code_raw
stringlengths 184
9.69k
| code_clean
stringlengths 120
4.07k
| docstring
stringlengths 50
6.05k
|
---|---|---|
def find_for_task_instance(task_instance, session):
"""
Returns all task reschedules for the task instance and try number,
in ascending order.
:param task_instance: the task instance to find task reschedules for
:type task_instance: airflow.models.TaskInstance
"""
TR = TaskReschedule
return (
session
.query(TR)
.filter(TR.dag_id == task_instance.dag_id,
TR.task_id == task_instance.task_id,
TR.execution_date == task_instance.execution_date,
TR.try_number == task_instance.try_number)
.order_by(asc(TR.id))
.all()
) | def find_for_task_instance(task_instance, session):
TR = TaskReschedule
return (
session
.query(TR)
.filter(TR.dag_id == task_instance.dag_id,
TR.task_id == task_instance.task_id,
TR.execution_date == task_instance.execution_date,
TR.try_number == task_instance.try_number)
.order_by(asc(TR.id))
.all()
) | Returns all task reschedules for the task instance and try number,
in ascending order.
:param task_instance: the task instance to find task reschedules for
:type task_instance: airflow.models.TaskInstance |
def remove_option(self, section, option, remove_default=True):
"""
Remove an option if it exists in config from a file or
default config. If both of config have the same option, this removes
the option in both configs unless remove_default=False.
"""
if super().has_option(section, option):
super().remove_option(section, option)
if self.airflow_defaults.has_option(section, option) and remove_default:
self.airflow_defaults.remove_option(section, option) | def remove_option(self, section, option, remove_default=True):
if super().has_option(section, option):
super().remove_option(section, option)
if self.airflow_defaults.has_option(section, option) and remove_default:
self.airflow_defaults.remove_option(section, option) | Remove an option if it exists in config from a file or
default config. If both of config have the same option, this removes
the option in both configs unless remove_default=False. |
def getsection(self, section):
"""
Returns the section as a dict. Values are converted to int, float, bool
as required.
:param section: section from the config
:rtype: dict
"""
if (section not in self._sections and
section not in self.airflow_defaults._sections):
return None
_section = copy.deepcopy(self.airflow_defaults._sections[section])
if section in self._sections:
_section.update(copy.deepcopy(self._sections[section]))
section_prefix = 'AIRFLOW__{S}__'.format(S=section.upper())
for env_var in sorted(os.environ.keys()):
if env_var.startswith(section_prefix):
key = env_var.replace(section_prefix, '').lower()
_section[key] = self._get_env_var_option(section, key)
for key, val in iteritems(_section):
try:
val = int(val)
except ValueError:
try:
val = float(val)
except ValueError:
if val.lower() in ('t', 'true'):
val = True
elif val.lower() in ('f', 'false'):
val = False
_section[key] = val
return _section | def getsection(self, section):
if (section not in self._sections and
section not in self.airflow_defaults._sections):
return None
_section = copy.deepcopy(self.airflow_defaults._sections[section])
if section in self._sections:
_section.update(copy.deepcopy(self._sections[section]))
section_prefix = 'AIRFLOW__{S}__'.format(S=section.upper())
for env_var in sorted(os.environ.keys()):
if env_var.startswith(section_prefix):
key = env_var.replace(section_prefix, '').lower()
_section[key] = self._get_env_var_option(section, key)
for key, val in iteritems(_section):
try:
val = int(val)
except ValueError:
try:
val = float(val)
except ValueError:
if val.lower() in ('t', 'true'):
val = True
elif val.lower() in ('f', 'false'):
val = False
_section[key] = val
return _section | Returns the section as a dict. Values are converted to int, float, bool
as required.
:param section: section from the config
:rtype: dict |
def allocate_ids(self, partial_keys):
"""
Allocate IDs for incomplete keys.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds
:param partial_keys: a list of partial keys.
:type partial_keys: list
:return: a list of full keys.
:rtype: list
"""
conn = self.get_conn()
resp = (conn
.projects()
.allocateIds(projectId=self.project_id, body={'keys': partial_keys})
.execute(num_retries=self.num_retries))
return resp['keys'] | def allocate_ids(self, partial_keys):
conn = self.get_conn()
resp = (conn
.projects()
.allocateIds(projectId=self.project_id, body={'keys': partial_keys})
.execute(num_retries=self.num_retries))
return resp['keys'] | Allocate IDs for incomplete keys.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds
:param partial_keys: a list of partial keys.
:type partial_keys: list
:return: a list of full keys.
:rtype: list |
def begin_transaction(self):
"""
Begins a new transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/beginTransaction
:return: a transaction handle.
:rtype: str
"""
conn = self.get_conn()
resp = (conn
.projects()
.beginTransaction(projectId=self.project_id, body={})
.execute(num_retries=self.num_retries))
return resp['transaction'] | def begin_transaction(self):
conn = self.get_conn()
resp = (conn
.projects()
.beginTransaction(projectId=self.project_id, body={})
.execute(num_retries=self.num_retries))
return resp['transaction'] | Begins a new transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/beginTransaction
:return: a transaction handle.
:rtype: str |
def commit(self, body):
"""
Commit a transaction, optionally creating, deleting or modifying some entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit
:param body: the body of the commit request.
:type body: dict
:return: the response body of the commit request.
:rtype: dict
"""
conn = self.get_conn()
resp = (conn
.projects()
.commit(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | def commit(self, body):
conn = self.get_conn()
resp = (conn
.projects()
.commit(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | Commit a transaction, optionally creating, deleting or modifying some entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit
:param body: the body of the commit request.
:type body: dict
:return: the response body of the commit request.
:rtype: dict |
def lookup(self, keys, read_consistency=None, transaction=None):
"""
Lookup some entities by key.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup
:param keys: the keys to lookup.
:type keys: list
:param read_consistency: the read consistency to use. default, strong or eventual.
Cannot be used with a transaction.
:type read_consistency: str
:param transaction: the transaction to use, if any.
:type transaction: str
:return: the response body of the lookup request.
:rtype: dict
"""
conn = self.get_conn()
body = {'keys': keys}
if read_consistency:
body['readConsistency'] = read_consistency
if transaction:
body['transaction'] = transaction
resp = (conn
.projects()
.lookup(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | def lookup(self, keys, read_consistency=None, transaction=None):
conn = self.get_conn()
body = {'keys': keys}
if read_consistency:
body['readConsistency'] = read_consistency
if transaction:
body['transaction'] = transaction
resp = (conn
.projects()
.lookup(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | Lookup some entities by key.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup
:param keys: the keys to lookup.
:type keys: list
:param read_consistency: the read consistency to use. default, strong or eventual.
Cannot be used with a transaction.
:type read_consistency: str
:param transaction: the transaction to use, if any.
:type transaction: str
:return: the response body of the lookup request.
:rtype: dict |
def rollback(self, transaction):
"""
Roll back a transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/rollback
:param transaction: the transaction to roll back.
:type transaction: str
"""
conn = self.get_conn()
conn.projects().rollback(
projectId=self.project_id, body={'transaction': transaction}
).execute(num_retries=self.num_retries) | def rollback(self, transaction):
conn = self.get_conn()
conn.projects().rollback(
projectId=self.project_id, body={'transaction': transaction}
).execute(num_retries=self.num_retries) | Roll back a transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/rollback
:param transaction: the transaction to roll back.
:type transaction: str |
def run_query(self, body):
"""
Run a query for entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/runQuery
:param body: the body of the query request.
:type body: dict
:return: the batch of query results.
:rtype: dict
"""
conn = self.get_conn()
resp = (conn
.projects()
.runQuery(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp['batch'] | def run_query(self, body):
conn = self.get_conn()
resp = (conn
.projects()
.runQuery(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp['batch'] | Run a query for entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/runQuery
:param body: the body of the query request.
:type body: dict
:return: the batch of query results.
:rtype: dict |
def get_operation(self, name):
"""
Gets the latest state of a long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/get
:param name: the name of the operation resource.
:type name: str
:return: a resource operation instance.
:rtype: dict
"""
conn = self.get_conn()
resp = (conn
.projects()
.operations()
.get(name=name)
.execute(num_retries=self.num_retries))
return resp | def get_operation(self, name):
conn = self.get_conn()
resp = (conn
.projects()
.operations()
.get(name=name)
.execute(num_retries=self.num_retries))
return resp | Gets the latest state of a long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/get
:param name: the name of the operation resource.
:type name: str
:return: a resource operation instance.
:rtype: dict |
def delete_operation(self, name):
"""
Deletes the long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:type name: str
:return: none if successful.
:rtype: dict
"""
conn = self.get_conn()
resp = (conn
.projects()
.operations()
.delete(name=name)
.execute(num_retries=self.num_retries))
return resp | def delete_operation(self, name):
conn = self.get_conn()
resp = (conn
.projects()
.operations()
.delete(name=name)
.execute(num_retries=self.num_retries))
return resp | Deletes the long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:type name: str
:return: none if successful.
:rtype: dict |
def poll_operation_until_done(self, name, polling_interval_in_seconds):
"""
Poll backup operation state until it's completed.
:param name: the name of the operation resource
:type name: str
:param polling_interval_in_seconds: The number of seconds to wait before calling another request.
:type polling_interval_in_seconds: int
:return: a resource operation instance.
:rtype: dict
"""
while True:
result = self.get_operation(name)
state = result['metadata']['common']['state']
if state == 'PROCESSING':
self.log.info('Operation is processing. Re-polling state in {} seconds'
.format(polling_interval_in_seconds))
time.sleep(polling_interval_in_seconds)
else:
return result | def poll_operation_until_done(self, name, polling_interval_in_seconds):
while True:
result = self.get_operation(name)
state = result['metadata']['common']['state']
if state == 'PROCESSING':
self.log.info('Operation is processing. Re-polling state in {} seconds'
.format(polling_interval_in_seconds))
time.sleep(polling_interval_in_seconds)
else:
return result | Poll backup operation state until it's completed.
:param name: the name of the operation resource
:type name: str
:param polling_interval_in_seconds: The number of seconds to wait before calling another request.
:type polling_interval_in_seconds: int
:return: a resource operation instance.
:rtype: dict |
def export_to_storage_bucket(self, bucket, namespace=None, entity_filter=None, labels=None):
"""
Export entities from Cloud Datastore to Cloud Storage for backup.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/export
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: Description of what data from the project is included in the export.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict
"""
admin_conn = self.get_conn()
output_uri_prefix = 'gs://' + '/'.join(filter(None, [bucket, namespace]))
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'outputUrlPrefix': output_uri_prefix,
'entityFilter': entity_filter,
'labels': labels,
}
resp = (admin_conn
.projects()
.export(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | def export_to_storage_bucket(self, bucket, namespace=None, entity_filter=None, labels=None):
admin_conn = self.get_conn()
output_uri_prefix = 'gs://' + '/'.join(filter(None, [bucket, namespace]))
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'outputUrlPrefix': output_uri_prefix,
'entityFilter': entity_filter,
'labels': labels,
}
resp = (admin_conn
.projects()
.export(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | Export entities from Cloud Datastore to Cloud Storage for backup.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/export
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: Description of what data from the project is included in the export.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict |
def import_from_storage_bucket(self, bucket, file, namespace=None, entity_filter=None, labels=None):
"""
Import a backup from Cloud Storage to Cloud Datastore.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param file: the metadata file written by the projects.export operation.
:type file: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: specify which kinds/namespaces are to be imported.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict
"""
admin_conn = self.get_conn()
input_url = 'gs://' + '/'.join(filter(None, [bucket, namespace, file]))
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'inputUrl': input_url,
'entityFilter': entity_filter,
'labels': labels,
}
resp = (admin_conn
.projects()
.import_(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | def import_from_storage_bucket(self, bucket, file, namespace=None, entity_filter=None, labels=None):
admin_conn = self.get_conn()
input_url = 'gs://' + '/'.join(filter(None, [bucket, namespace, file]))
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'inputUrl': input_url,
'entityFilter': entity_filter,
'labels': labels,
}
resp = (admin_conn
.projects()
.import_(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | Import a backup from Cloud Storage to Cloud Datastore.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param file: the metadata file written by the projects.export operation.
:type file: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: specify which kinds/namespaces are to be imported.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict |
def publish_to_target(self, target_arn, message):
"""
Publish a message to a topic or an endpoint.
:param target_arn: either a TopicArn or an EndpointArn
:type target_arn: str
:param message: the default message you want to send
:param message: str
"""
conn = self.get_conn()
messages = {
'default': message
}
return conn.publish(
TargetArn=target_arn,
Message=json.dumps(messages),
MessageStructure='json'
) | def publish_to_target(self, target_arn, message):
conn = self.get_conn()
messages = {
'default': message
}
return conn.publish(
TargetArn=target_arn,
Message=json.dumps(messages),
MessageStructure='json'
) | Publish a message to a topic or an endpoint.
:param target_arn: either a TopicArn or an EndpointArn
:type target_arn: str
:param message: the default message you want to send
:param message: str |
def get_hostname():
"""
Fetch the hostname using the callable from the config or using
`socket.getfqdn` as a fallback.
"""
# First we attempt to fetch the callable path from the config.
try:
callable_path = conf.get('core', 'hostname_callable')
except AirflowConfigException:
callable_path = None
# Then we handle the case when the config is missing or empty. This is the
# default behavior.
if not callable_path:
return socket.getfqdn()
# Since we have a callable path, we try to import and run it next.
module_path, attr_name = callable_path.split(':')
module = importlib.import_module(module_path)
callable = getattr(module, attr_name)
return callable() | def get_hostname():
# First we attempt to fetch the callable path from the config.
try:
callable_path = conf.get('core', 'hostname_callable')
except AirflowConfigException:
callable_path = None
# Then we handle the case when the config is missing or empty. This is the
# default behavior.
if not callable_path:
return socket.getfqdn()
# Since we have a callable path, we try to import and run it next.
module_path, attr_name = callable_path.split(':')
module = importlib.import_module(module_path)
callable = getattr(module, attr_name)
return callable() | Fetch the hostname using the callable from the config or using
`socket.getfqdn` as a fallback. |
def get_conn(self):
"""
Retrieves connection to Cloud Natural Language service.
:return: Cloud Natural Language service object
:rtype: google.cloud.language_v1.LanguageServiceClient
"""
if not self._conn:
self._conn = LanguageServiceClient(credentials=self._get_credentials())
return self._conn | def get_conn(self):
if not self._conn:
self._conn = LanguageServiceClient(credentials=self._get_credentials())
return self._conn | Retrieves connection to Cloud Natural Language service.
:return: Cloud Natural Language service object
:rtype: google.cloud.language_v1.LanguageServiceClient |
def analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None):
"""
Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
"""
client = self.get_conn()
return client.analyze_entities(
document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata
) | def analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None):
client = self.get_conn()
return client.analyze_entities(
document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata
) | Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse |
def annotate_text(self, document, features, encoding_type=None, retry=None, timeout=None, metadata=None):
"""
A convenience method that provides all the features that analyzeSentiment,
analyzeEntities, and analyzeSyntax provide in one call.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param features: The enabled features.
If a dict is provided, it must be of the same form as the protobuf message Features
:type features: dict or google.cloud.language_v1.enums.Features
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnnotateTextResponse
"""
client = self.get_conn()
return client.annotate_text(
document=document,
features=features,
encoding_type=encoding_type,
retry=retry,
timeout=timeout,
metadata=metadata,
) | def annotate_text(self, document, features, encoding_type=None, retry=None, timeout=None, metadata=None):
client = self.get_conn()
return client.annotate_text(
document=document,
features=features,
encoding_type=encoding_type,
retry=retry,
timeout=timeout,
metadata=metadata,
) | A convenience method that provides all the features that analyzeSentiment,
analyzeEntities, and analyzeSyntax provide in one call.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param features: The enabled features.
If a dict is provided, it must be of the same form as the protobuf message Features
:type features: dict or google.cloud.language_v1.enums.Features
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnnotateTextResponse |
def classify_text(self, document, retry=None, timeout=None, metadata=None):
"""
Classifies a document into categories.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
"""
client = self.get_conn()
return client.classify_text(document=document, retry=retry, timeout=timeout, metadata=metadata) | def classify_text(self, document, retry=None, timeout=None, metadata=None):
client = self.get_conn()
return client.classify_text(document=document, retry=retry, timeout=timeout, metadata=metadata) | Classifies a document into categories.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse |
def get_template_field(env, fullname):
"""
Gets template fields for specific operator class.
:param fullname: Full path to operator class.
For example: ``airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetCreateOperator``
:return: List of template field
:rtype: list[str]
"""
modname, classname = fullname.rsplit(".", 1)
try:
with mock(env.config.autodoc_mock_imports):
mod = import_module(modname)
except ImportError:
raise RoleException("Error loading %s module." % (modname, ))
clazz = getattr(mod, classname)
if not clazz:
raise RoleException("Error finding %s class in %s module." % (classname, modname))
template_fields = getattr(clazz, "template_fields")
if not template_fields:
raise RoleException(
"Could not find the template fields for %s class in %s module." % (classname, modname)
)
return list(template_fields) | def get_template_field(env, fullname):
modname, classname = fullname.rsplit(".", 1)
try:
with mock(env.config.autodoc_mock_imports):
mod = import_module(modname)
except ImportError:
raise RoleException("Error loading %s module." % (modname, ))
clazz = getattr(mod, classname)
if not clazz:
raise RoleException("Error finding %s class in %s module." % (classname, modname))
template_fields = getattr(clazz, "template_fields")
if not template_fields:
raise RoleException(
"Could not find the template fields for %s class in %s module." % (classname, modname)
)
return list(template_fields) | Gets template fields for specific operator class.
:param fullname: Full path to operator class.
For example: ``airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetCreateOperator``
:return: List of template field
:rtype: list[str] |
def template_field_role(app, typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""
A role that allows you to include a list of template fields in the middle of the text. This is especially
useful when writing guides describing how to use the operator.
The result is a list of fields where each field is shorted in the literal block.
Sample usage::
:template-fields:`airflow.contrib.operators.gcp_natural_language_operator.CloudLanguageAnalyzeSentimentOperator`
For further information look at:
* [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted
Text Roles)
"""
text = utils.unescape(text)
try:
template_fields = get_template_field(app.env, text)
except RoleException as e:
msg = inliner.reporter.error("invalid class name %s \n%s" % (text, e, ), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = nodes.inline(rawtext=rawtext)
for i, field in enumerate(template_fields):
if i != 0:
node += nodes.Text(", ")
node += nodes.literal(field, "", nodes.Text(field))
return [node], [] | def template_field_role(app, typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
try:
template_fields = get_template_field(app.env, text)
except RoleException as e:
msg = inliner.reporter.error("invalid class name %s \n%s" % (text, e, ), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = nodes.inline(rawtext=rawtext)
for i, field in enumerate(template_fields):
if i != 0:
node += nodes.Text(", ")
node += nodes.literal(field, "", nodes.Text(field))
return [node], [] | A role that allows you to include a list of template fields in the middle of the text. This is especially
useful when writing guides describing how to use the operator.
The result is a list of fields where each field is shorted in the literal block.
Sample usage::
:template-fields:`airflow.contrib.operators.gcp_natural_language_operator.CloudLanguageAnalyzeSentimentOperator`
For further information look at:
* [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted
Text Roles) |
def prepare_classpath():
"""
Ensures that certain subfolders of AIRFLOW_HOME are on the classpath
"""
if DAGS_FOLDER not in sys.path:
sys.path.append(DAGS_FOLDER)
# Add ./config/ for loading custom log parsers etc, or
# airflow_local_settings etc.
config_path = os.path.join(AIRFLOW_HOME, 'config')
if config_path not in sys.path:
sys.path.append(config_path)
if PLUGINS_FOLDER not in sys.path:
sys.path.append(PLUGINS_FOLDER) | def prepare_classpath():
if DAGS_FOLDER not in sys.path:
sys.path.append(DAGS_FOLDER)
# Add ./config/ for loading custom log parsers etc, or
# airflow_local_settings etc.
config_path = os.path.join(AIRFLOW_HOME, 'config')
if config_path not in sys.path:
sys.path.append(config_path)
if PLUGINS_FOLDER not in sys.path:
sys.path.append(PLUGINS_FOLDER) | Ensures that certain subfolders of AIRFLOW_HOME are on the classpath |
def _check_task_id(self, context):
"""
Gets the returned Celery result from the Airflow task
ID provided to the sensor, and returns True if the
celery result has been finished execution.
:param context: Airflow's execution context
:type context: dict
:return: True if task has been executed, otherwise False
:rtype: bool
"""
ti = context['ti']
celery_result = ti.xcom_pull(task_ids=self.target_task_id)
return celery_result.ready() | def _check_task_id(self, context):
ti = context['ti']
celery_result = ti.xcom_pull(task_ids=self.target_task_id)
return celery_result.ready() | Gets the returned Celery result from the Airflow task
ID provided to the sensor, and returns True if the
celery result has been finished execution.
:param context: Airflow's execution context
:type context: dict
:return: True if task has been executed, otherwise False
:rtype: bool |
def detect_conf_var():
"""Return true if the ticket cache contains "conf" information as is found
in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the
Sun Java Krb5LoginModule in Java6, so we need to take an action to work
around it.
"""
ticket_cache = configuration.conf.get('kerberos', 'ccache')
with open(ticket_cache, 'rb') as f:
# Note: this file is binary, so we check against a bytearray.
return b'X-CACHECONF:' in f.read() | def detect_conf_var():
ticket_cache = configuration.conf.get('kerberos', 'ccache')
with open(ticket_cache, 'rb') as f:
# Note: this file is binary, so we check against a bytearray.
return b'X-CACHECONF:' in f.read() | Return true if the ticket cache contains "conf" information as is found
in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the
Sun Java Krb5LoginModule in Java6, so we need to take an action to work
around it. |
def alchemy_to_dict(obj):
"""
Transforms a SQLAlchemy model instance into a dictionary
"""
if not obj:
return None
d = {}
for c in obj.__table__.columns:
value = getattr(obj, c.name)
if type(value) == datetime:
value = value.isoformat()
d[c.name] = value
return d | def alchemy_to_dict(obj):
if not obj:
return None
d = {}
for c in obj.__table__.columns:
value = getattr(obj, c.name)
if type(value) == datetime:
value = value.isoformat()
d[c.name] = value
return d | Transforms a SQLAlchemy model instance into a dictionary |
def chunks(items, chunk_size):
"""
Yield successive chunks of a given size from a list of items
"""
if chunk_size <= 0:
raise ValueError('Chunk size must be a positive integer')
for i in range(0, len(items), chunk_size):
yield items[i:i + chunk_size] | def chunks(items, chunk_size):
if chunk_size <= 0:
raise ValueError('Chunk size must be a positive integer')
for i in range(0, len(items), chunk_size):
yield items[i:i + chunk_size] | Yield successive chunks of a given size from a list of items |
def reduce_in_chunks(fn, iterable, initializer, chunk_size=0):
"""
Reduce the given list of items by splitting it into chunks
of the given size and passing each chunk through the reducer
"""
if len(iterable) == 0:
return initializer
if chunk_size == 0:
chunk_size = len(iterable)
return reduce(fn, chunks(iterable, chunk_size), initializer) | def reduce_in_chunks(fn, iterable, initializer, chunk_size=0):
if len(iterable) == 0:
return initializer
if chunk_size == 0:
chunk_size = len(iterable)
return reduce(fn, chunks(iterable, chunk_size), initializer) | Reduce the given list of items by splitting it into chunks
of the given size and passing each chunk through the reducer |
def chain(*tasks):
"""
Given a number of tasks, builds a dependency chain.
chain(task_1, task_2, task_3, task_4)
is equivalent to
task_1.set_downstream(task_2)
task_2.set_downstream(task_3)
task_3.set_downstream(task_4)
"""
for up_task, down_task in zip(tasks[:-1], tasks[1:]):
up_task.set_downstream(down_task) | def chain(*tasks):
for up_task, down_task in zip(tasks[:-1], tasks[1:]):
up_task.set_downstream(down_task) | Given a number of tasks, builds a dependency chain.
chain(task_1, task_2, task_3, task_4)
is equivalent to
task_1.set_downstream(task_2)
task_2.set_downstream(task_3)
task_3.set_downstream(task_4) |
def pprinttable(rows):
"""Returns a pretty ascii table from tuples
If namedtuple are used, the table will have headers
"""
if not rows:
return
if hasattr(rows[0], '_fields'): # if namedtuple
headers = rows[0]._fields
else:
headers = ["col{}".format(i) for i in range(len(rows[0]))]
lens = [len(s) for s in headers]
for row in rows:
for i in range(len(rows[0])):
slenght = len("{}".format(row[i]))
if slenght > lens[i]:
lens[i] = slenght
formats = []
hformats = []
for i in range(len(rows[0])):
if isinstance(rows[0][i], int):
formats.append("%%%dd" % lens[i])
else:
formats.append("%%-%ds" % lens[i])
hformats.append("%%-%ds" % lens[i])
pattern = " | ".join(formats)
hpattern = " | ".join(hformats)
separator = "-+-".join(['-' * n for n in lens])
s = ""
s += separator + '\n'
s += (hpattern % tuple(headers)) + '\n'
s += separator + '\n'
def f(t):
return "{}".format(t) if isinstance(t, basestring) else t
for line in rows:
s += pattern % tuple(f(t) for t in line) + '\n'
s += separator + '\n'
return s | def pprinttable(rows):
if not rows:
return
if hasattr(rows[0], '_fields'): # if namedtuple
headers = rows[0]._fields
else:
headers = ["col{}".format(i) for i in range(len(rows[0]))]
lens = [len(s) for s in headers]
for row in rows:
for i in range(len(rows[0])):
slenght = len("{}".format(row[i]))
if slenght > lens[i]:
lens[i] = slenght
formats = []
hformats = []
for i in range(len(rows[0])):
if isinstance(rows[0][i], int):
formats.append("%%%dd" % lens[i])
else:
formats.append("%%-%ds" % lens[i])
hformats.append("%%-%ds" % lens[i])
pattern = " | ".join(formats)
hpattern = " | ".join(hformats)
separator = "-+-".join(['-' * n for n in lens])
s = ""
s += separator + '\n'
s += (hpattern % tuple(headers)) + '\n'
s += separator + '\n'
def f(t):
return "{}".format(t) if isinstance(t, basestring) else t
for line in rows:
s += pattern % tuple(f(t) for t in line) + '\n'
s += separator + '\n'
return s | Returns a pretty ascii table from tuples
If namedtuple are used, the table will have headers |
def render_log_filename(ti, try_number, filename_template):
"""
Given task instance, try_number, filename_template, return the rendered log
filename
:param ti: task instance
:param try_number: try_number of the task
:param filename_template: filename template, which can be jinja template or
python string template
"""
filename_template, filename_jinja_template = parse_template_string(filename_template)
if filename_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return filename_jinja_template.render(**jinja_context)
return filename_template.format(dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
try_number=try_number) | def render_log_filename(ti, try_number, filename_template):
filename_template, filename_jinja_template = parse_template_string(filename_template)
if filename_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return filename_jinja_template.render(**jinja_context)
return filename_template.format(dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
try_number=try_number) | Given task instance, try_number, filename_template, return the rendered log
filename
:param ti: task instance
:param try_number: try_number of the task
:param filename_template: filename template, which can be jinja template or
python string template |
def wait(self, operation):
"""Awaits for Google Cloud Dataproc Operation to complete."""
submitted = _DataProcOperation(self.get_conn(), operation,
self.num_retries)
submitted.wait_for_done() | def wait(self, operation):
submitted = _DataProcOperation(self.get_conn(), operation,
self.num_retries)
submitted.wait_for_done() | Awaits for Google Cloud Dataproc Operation to complete. |
def _deep_string_coerce(content, json_path='json'):
"""
Coerces content or all values of content if it is a dict to a string. The
function will throw if content contains non-string or non-numeric types.
The reason why we have this function is because the ``self.json`` field must be a
dict with only string values. This is because ``render_template`` will fail
for numerical values.
"""
c = _deep_string_coerce
if isinstance(content, six.string_types):
return content
elif isinstance(content, six.integer_types + (float,)):
# Databricks can tolerate either numeric or string types in the API backend.
return str(content)
elif isinstance(content, (list, tuple)):
return [c(e, '{0}[{1}]'.format(json_path, i)) for i, e in enumerate(content)]
elif isinstance(content, dict):
return {k: c(v, '{0}[{1}]'.format(json_path, k))
for k, v in list(content.items())}
else:
param_type = type(content)
msg = 'Type {0} used for parameter {1} is not a number or a string' \
.format(param_type, json_path)
raise AirflowException(msg) | def _deep_string_coerce(content, json_path='json'):
c = _deep_string_coerce
if isinstance(content, six.string_types):
return content
elif isinstance(content, six.integer_types + (float,)):
# Databricks can tolerate either numeric or string types in the API backend.
return str(content)
elif isinstance(content, (list, tuple)):
return [c(e, '{0}[{1}]'.format(json_path, i)) for i, e in enumerate(content)]
elif isinstance(content, dict):
return {k: c(v, '{0}[{1}]'.format(json_path, k))
for k, v in list(content.items())}
else:
param_type = type(content)
msg = 'Type {0} used for parameter {1} is not a number or a string' \
.format(param_type, json_path)
raise AirflowException(msg) | Coerces content or all values of content if it is a dict to a string. The
function will throw if content contains non-string or non-numeric types.
The reason why we have this function is because the ``self.json`` field must be a
dict with only string values. This is because ``render_template`` will fail
for numerical values. |
def _handle_databricks_operator_execution(operator, hook, log, context):
"""
Handles the Airflow + Databricks lifecycle logic for a Databricks operator
:param operator: Databricks operator being handled
:param context: Airflow context
"""
if operator.do_xcom_push:
context['ti'].xcom_push(key=XCOM_RUN_ID_KEY, value=operator.run_id)
log.info('Run submitted with run_id: %s', operator.run_id)
run_page_url = hook.get_run_page_url(operator.run_id)
if operator.do_xcom_push:
context['ti'].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=run_page_url)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
while True:
run_state = hook.get_run_state(operator.run_id)
if run_state.is_terminal:
if run_state.is_successful:
log.info('%s completed successfully.', operator.task_id)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
return
else:
error_message = '{t} failed with terminal state: {s}'.format(
t=operator.task_id,
s=run_state)
raise AirflowException(error_message)
else:
log.info('%s in run state: %s', operator.task_id, run_state)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
log.info('Sleeping for %s seconds.', operator.polling_period_seconds)
time.sleep(operator.polling_period_seconds) | def _handle_databricks_operator_execution(operator, hook, log, context):
if operator.do_xcom_push:
context['ti'].xcom_push(key=XCOM_RUN_ID_KEY, value=operator.run_id)
log.info('Run submitted with run_id: %s', operator.run_id)
run_page_url = hook.get_run_page_url(operator.run_id)
if operator.do_xcom_push:
context['ti'].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=run_page_url)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
while True:
run_state = hook.get_run_state(operator.run_id)
if run_state.is_terminal:
if run_state.is_successful:
log.info('%s completed successfully.', operator.task_id)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
return
else:
error_message = '{t} failed with terminal state: {s}'.format(
t=operator.task_id,
s=run_state)
raise AirflowException(error_message)
else:
log.info('%s in run state: %s', operator.task_id, run_state)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
log.info('Sleeping for %s seconds.', operator.polling_period_seconds)
time.sleep(operator.polling_period_seconds) | Handles the Airflow + Databricks lifecycle logic for a Databricks operator
:param operator: Databricks operator being handled
:param context: Airflow context |
def run_cli(self, pig, verbose=True):
"""
Run an pig script using the pig cli
>>> ph = PigCliHook()
>>> result = ph.run_cli("ls /;")
>>> ("hdfs://" in result)
True
"""
with TemporaryDirectory(prefix='airflow_pigop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(pig.encode('utf-8'))
f.flush()
fname = f.name
pig_bin = 'pig'
cmd_extra = []
pig_cmd = [pig_bin, '-f', fname] + cmd_extra
if self.pig_properties:
pig_properties_list = self.pig_properties.split()
pig_cmd.extend(pig_properties_list)
if verbose:
self.log.info("%s", " ".join(pig_cmd))
sp = subprocess.Popen(
pig_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
for line in iter(sp.stdout.readline, b''):
stdout += line.decode('utf-8')
if verbose:
self.log.info(line.strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout | def run_cli(self, pig, verbose=True):
with TemporaryDirectory(prefix='airflow_pigop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(pig.encode('utf-8'))
f.flush()
fname = f.name
pig_bin = 'pig'
cmd_extra = []
pig_cmd = [pig_bin, '-f', fname] + cmd_extra
if self.pig_properties:
pig_properties_list = self.pig_properties.split()
pig_cmd.extend(pig_properties_list)
if verbose:
self.log.info("%s", " ".join(pig_cmd))
sp = subprocess.Popen(
pig_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
for line in iter(sp.stdout.readline, b''):
stdout += line.decode('utf-8')
if verbose:
self.log.info(line.strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout | Run an pig script using the pig cli
>>> ph = PigCliHook()
>>> result = ph.run_cli("ls /;")
>>> ("hdfs://" in result)
True |
def fetch_celery_task_state(celery_task):
"""
Fetch and return the state of the given celery task. The scope of this function is
global so that it can be called by subprocesses in the pool.
:param celery_task: a tuple of the Celery task key and the async Celery object used
to fetch the task's state
:type celery_task: tuple(str, celery.result.AsyncResult)
:return: a tuple of the Celery task key and the Celery state of the task
:rtype: tuple[str, str]
"""
try:
with timeout(seconds=2):
# Accessing state property of celery task will make actual network request
# to get the current state of the task.
res = (celery_task[0], celery_task[1].state)
except Exception as e:
exception_traceback = "Celery Task ID: {}\n{}".format(celery_task[0],
traceback.format_exc())
res = ExceptionWithTraceback(e, exception_traceback)
return res | def fetch_celery_task_state(celery_task):
try:
with timeout(seconds=2):
# Accessing state property of celery task will make actual network request
# to get the current state of the task.
res = (celery_task[0], celery_task[1].state)
except Exception as e:
exception_traceback = "Celery Task ID: {}\n{}".format(celery_task[0],
traceback.format_exc())
res = ExceptionWithTraceback(e, exception_traceback)
return res | Fetch and return the state of the given celery task. The scope of this function is
global so that it can be called by subprocesses in the pool.
:param celery_task: a tuple of the Celery task key and the async Celery object used
to fetch the task's state
:type celery_task: tuple(str, celery.result.AsyncResult)
:return: a tuple of the Celery task key and the Celery state of the task
:rtype: tuple[str, str] |
def _num_tasks_per_send_process(self, to_send_count):
"""
How many Celery tasks should each worker process send.
:return: Number of tasks that should be sent per process
:rtype: int
"""
return max(1,
int(math.ceil(1.0 * to_send_count / self._sync_parallelism))) | def _num_tasks_per_send_process(self, to_send_count):
return max(1,
int(math.ceil(1.0 * to_send_count / self._sync_parallelism))) | How many Celery tasks should each worker process send.
:return: Number of tasks that should be sent per process
:rtype: int |
def _num_tasks_per_fetch_process(self):
"""
How many Celery tasks should be sent to each worker process.
:return: Number of tasks that should be used per process
:rtype: int
"""
return max(1,
int(math.ceil(1.0 * len(self.tasks) / self._sync_parallelism))) | def _num_tasks_per_fetch_process(self):
return max(1,
int(math.ceil(1.0 * len(self.tasks) / self._sync_parallelism))) | How many Celery tasks should be sent to each worker process.
:return: Number of tasks that should be used per process
:rtype: int |
def setdefault(cls, key, default, deserialize_json=False):
"""
Like a Python builtin dict object, setdefault returns the current value
for a key, and if it isn't there, stores the default value and returns it.
:param key: Dict key for this Variable
:type key: str
:param default: Default value to set and return if the variable
isn't already in the DB
:type default: Mixed
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:return: Mixed
"""
obj = Variable.get(key, default_var=None,
deserialize_json=deserialize_json)
if obj is None:
if default is not None:
Variable.set(key, default, serialize_json=deserialize_json)
return default
else:
raise ValueError('Default Value must be set')
else:
return obj | def setdefault(cls, key, default, deserialize_json=False):
obj = Variable.get(key, default_var=None,
deserialize_json=deserialize_json)
if obj is None:
if default is not None:
Variable.set(key, default, serialize_json=deserialize_json)
return default
else:
raise ValueError('Default Value must be set')
else:
return obj | Like a Python builtin dict object, setdefault returns the current value
for a key, and if it isn't there, stores the default value and returns it.
:param key: Dict key for this Variable
:type key: str
:param default: Default value to set and return if the variable
isn't already in the DB
:type default: Mixed
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:return: Mixed |
def create_job(self, project_id, job, use_existing_job_fn=None):
"""
Launches a MLEngine job and wait for it to reach a terminal state.
:param project_id: The Google Cloud project id within which MLEngine
job will be launched.
:type project_id: str
:param job: MLEngine Job object that should be provided to the MLEngine
API, such as: ::
{
'jobId': 'my_job_id',
'trainingInput': {
'scaleTier': 'STANDARD_1',
...
}
}
:type job: dict
:param use_existing_job_fn: In case that a MLEngine job with the same
job_id already exist, this method (if provided) will decide whether
we should use this existing job, continue waiting for it to finish
and returning the job object. It should accepts a MLEngine job
object, and returns a boolean value indicating whether it is OK to
reuse the existing job. If 'use_existing_job_fn' is not provided,
we by default reuse the existing MLEngine job.
:type use_existing_job_fn: function
:return: The MLEngine job object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
:rtype: dict
"""
request = self._mlengine.projects().jobs().create(
parent='projects/{}'.format(project_id),
body=job)
job_id = job['jobId']
try:
request.execute()
except HttpError as e:
# 409 means there is an existing job with the same job ID.
if e.resp.status == 409:
if use_existing_job_fn is not None:
existing_job = self._get_job(project_id, job_id)
if not use_existing_job_fn(existing_job):
self.log.error(
'Job with job_id %s already exist, but it does '
'not match our expectation: %s',
job_id, existing_job
)
raise
self.log.info(
'Job with job_id %s already exist. Will waiting for it to finish',
job_id
)
else:
self.log.error('Failed to create MLEngine job: {}'.format(e))
raise
return self._wait_for_job_done(project_id, job_id) | def create_job(self, project_id, job, use_existing_job_fn=None):
request = self._mlengine.projects().jobs().create(
parent='projects/{}'.format(project_id),
body=job)
job_id = job['jobId']
try:
request.execute()
except HttpError as e:
# 409 means there is an existing job with the same job ID.
if e.resp.status == 409:
if use_existing_job_fn is not None:
existing_job = self._get_job(project_id, job_id)
if not use_existing_job_fn(existing_job):
self.log.error(
'Job with job_id %s already exist, but it does '
'not match our expectation: %s',
job_id, existing_job
)
raise
self.log.info(
'Job with job_id %s already exist. Will waiting for it to finish',
job_id
)
else:
self.log.error('Failed to create MLEngine job: {}'.format(e))
raise
return self._wait_for_job_done(project_id, job_id) | Launches a MLEngine job and wait for it to reach a terminal state.
:param project_id: The Google Cloud project id within which MLEngine
job will be launched.
:type project_id: str
:param job: MLEngine Job object that should be provided to the MLEngine
API, such as: ::
{
'jobId': 'my_job_id',
'trainingInput': {
'scaleTier': 'STANDARD_1',
...
}
}
:type job: dict
:param use_existing_job_fn: In case that a MLEngine job with the same
job_id already exist, this method (if provided) will decide whether
we should use this existing job, continue waiting for it to finish
and returning the job object. It should accepts a MLEngine job
object, and returns a boolean value indicating whether it is OK to
reuse the existing job. If 'use_existing_job_fn' is not provided,
we by default reuse the existing MLEngine job.
:type use_existing_job_fn: function
:return: The MLEngine job object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
:rtype: dict |
def _get_job(self, project_id, job_id):
"""
Gets a MLEngine job based on the job name.
:return: MLEngine job object if succeed.
:rtype: dict
Raises:
googleapiclient.errors.HttpError: if HTTP error is returned from server
"""
job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)
request = self._mlengine.projects().jobs().get(name=job_name)
while True:
try:
return request.execute()
except HttpError as e:
if e.resp.status == 429:
# polling after 30 seconds when quota failure occurs
time.sleep(30)
else:
self.log.error('Failed to get MLEngine job: {}'.format(e))
raise | def _get_job(self, project_id, job_id):
job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)
request = self._mlengine.projects().jobs().get(name=job_name)
while True:
try:
return request.execute()
except HttpError as e:
if e.resp.status == 429:
# polling after 30 seconds when quota failure occurs
time.sleep(30)
else:
self.log.error('Failed to get MLEngine job: {}'.format(e))
raise | Gets a MLEngine job based on the job name.
:return: MLEngine job object if succeed.
:rtype: dict
Raises:
googleapiclient.errors.HttpError: if HTTP error is returned from server |
def _wait_for_job_done(self, project_id, job_id, interval=30):
"""
Waits for the Job to reach a terminal state.
This method will periodically check the job state until the job reach
a terminal state.
Raises:
googleapiclient.errors.HttpError: if HTTP error is returned when getting
the job
"""
if interval <= 0:
raise ValueError("Interval must be > 0")
while True:
job = self._get_job(project_id, job_id)
if job['state'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
return job
time.sleep(interval) | def _wait_for_job_done(self, project_id, job_id, interval=30):
if interval <= 0:
raise ValueError("Interval must be > 0")
while True:
job = self._get_job(project_id, job_id)
if job['state'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
return job
time.sleep(interval) | Waits for the Job to reach a terminal state.
This method will periodically check the job state until the job reach
a terminal state.
Raises:
googleapiclient.errors.HttpError: if HTTP error is returned when getting
the job |
def create_version(self, project_id, model_name, version_spec):
"""
Creates the Version on Google Cloud ML Engine.
Returns the operation if the version was created successfully and
raises an error otherwise.
"""
parent_name = 'projects/{}/models/{}'.format(project_id, model_name)
create_request = self._mlengine.projects().models().versions().create(
parent=parent_name, body=version_spec)
response = create_request.execute()
get_request = self._mlengine.projects().operations().get(
name=response['name'])
return _poll_with_exponential_delay(
request=get_request,
max_n=9,
is_done_func=lambda resp: resp.get('done', False),
is_error_func=lambda resp: resp.get('error', None) is not None) | def create_version(self, project_id, model_name, version_spec):
parent_name = 'projects/{}/models/{}'.format(project_id, model_name)
create_request = self._mlengine.projects().models().versions().create(
parent=parent_name, body=version_spec)
response = create_request.execute()
get_request = self._mlengine.projects().operations().get(
name=response['name'])
return _poll_with_exponential_delay(
request=get_request,
max_n=9,
is_done_func=lambda resp: resp.get('done', False),
is_error_func=lambda resp: resp.get('error', None) is not None) | Creates the Version on Google Cloud ML Engine.
Returns the operation if the version was created successfully and
raises an error otherwise. |
def set_default_version(self, project_id, model_name, version_name):
"""
Sets a version to be the default. Blocks until finished.
"""
full_version_name = 'projects/{}/models/{}/versions/{}'.format(
project_id, model_name, version_name)
request = self._mlengine.projects().models().versions().setDefault(
name=full_version_name, body={})
try:
response = request.execute()
self.log.info('Successfully set version: %s to default', response)
return response
except HttpError as e:
self.log.error('Something went wrong: %s', e)
raise | def set_default_version(self, project_id, model_name, version_name):
full_version_name = 'projects/{}/models/{}/versions/{}'.format(
project_id, model_name, version_name)
request = self._mlengine.projects().models().versions().setDefault(
name=full_version_name, body={})
try:
response = request.execute()
self.log.info('Successfully set version: %s to default', response)
return response
except HttpError as e:
self.log.error('Something went wrong: %s', e)
raise | Sets a version to be the default. Blocks until finished. |
def list_versions(self, project_id, model_name):
"""
Lists all available versions of a model. Blocks until finished.
"""
result = []
full_parent_name = 'projects/{}/models/{}'.format(
project_id, model_name)
request = self._mlengine.projects().models().versions().list(
parent=full_parent_name, pageSize=100)
response = request.execute()
next_page_token = response.get('nextPageToken', None)
result.extend(response.get('versions', []))
while next_page_token is not None:
next_request = self._mlengine.projects().models().versions().list(
parent=full_parent_name,
pageToken=next_page_token,
pageSize=100)
response = next_request.execute()
next_page_token = response.get('nextPageToken', None)
result.extend(response.get('versions', []))
time.sleep(5)
return result | def list_versions(self, project_id, model_name):
result = []
full_parent_name = 'projects/{}/models/{}'.format(
project_id, model_name)
request = self._mlengine.projects().models().versions().list(
parent=full_parent_name, pageSize=100)
response = request.execute()
next_page_token = response.get('nextPageToken', None)
result.extend(response.get('versions', []))
while next_page_token is not None:
next_request = self._mlengine.projects().models().versions().list(
parent=full_parent_name,
pageToken=next_page_token,
pageSize=100)
response = next_request.execute()
next_page_token = response.get('nextPageToken', None)
result.extend(response.get('versions', []))
time.sleep(5)
return result | Lists all available versions of a model. Blocks until finished. |
def delete_version(self, project_id, model_name, version_name):
"""
Deletes the given version of a model. Blocks until finished.
"""
full_name = 'projects/{}/models/{}/versions/{}'.format(
project_id, model_name, version_name)
delete_request = self._mlengine.projects().models().versions().delete(
name=full_name)
response = delete_request.execute()
get_request = self._mlengine.projects().operations().get(
name=response['name'])
return _poll_with_exponential_delay(
request=get_request,
max_n=9,
is_done_func=lambda resp: resp.get('done', False),
is_error_func=lambda resp: resp.get('error', None) is not None) | def delete_version(self, project_id, model_name, version_name):
full_name = 'projects/{}/models/{}/versions/{}'.format(
project_id, model_name, version_name)
delete_request = self._mlengine.projects().models().versions().delete(
name=full_name)
response = delete_request.execute()
get_request = self._mlengine.projects().operations().get(
name=response['name'])
return _poll_with_exponential_delay(
request=get_request,
max_n=9,
is_done_func=lambda resp: resp.get('done', False),
is_error_func=lambda resp: resp.get('error', None) is not None) | Deletes the given version of a model. Blocks until finished. |
def write_batch_data(self, items):
"""
Write batch items to dynamodb table with provisioned throughout capacity.
"""
dynamodb_conn = self.get_conn()
try:
table = dynamodb_conn.Table(self.table_name)
with table.batch_writer(overwrite_by_pkeys=self.table_keys) as batch:
for item in items:
batch.put_item(Item=item)
return True
except Exception as general_error:
raise AirflowException(
'Failed to insert items in dynamodb, error: {error}'.format(
error=str(general_error)
)
) | def write_batch_data(self, items):
dynamodb_conn = self.get_conn()
try:
table = dynamodb_conn.Table(self.table_name)
with table.batch_writer(overwrite_by_pkeys=self.table_keys) as batch:
for item in items:
batch.put_item(Item=item)
return True
except Exception as general_error:
raise AirflowException(
'Failed to insert items in dynamodb, error: {error}'.format(
error=str(general_error)
)
) | Write batch items to dynamodb table with provisioned throughout capacity. |
def get_default_executor():
"""Creates a new instance of the configured executor if none exists and returns it"""
global DEFAULT_EXECUTOR
if DEFAULT_EXECUTOR is not None:
return DEFAULT_EXECUTOR
executor_name = configuration.conf.get('core', 'EXECUTOR')
DEFAULT_EXECUTOR = _get_executor(executor_name)
log = LoggingMixin().log
log.info("Using executor %s", executor_name)
return DEFAULT_EXECUTOR | def get_default_executor():
global DEFAULT_EXECUTOR
if DEFAULT_EXECUTOR is not None:
return DEFAULT_EXECUTOR
executor_name = configuration.conf.get('core', 'EXECUTOR')
DEFAULT_EXECUTOR = _get_executor(executor_name)
log = LoggingMixin().log
log.info("Using executor %s", executor_name)
return DEFAULT_EXECUTOR | Creates a new instance of the configured executor if none exists and returns it |
def _get_executor(executor_name):
"""
Creates a new instance of the named executor.
In case the executor name is not know in airflow,
look for it in the plugins
"""
if executor_name == Executors.LocalExecutor:
return LocalExecutor()
elif executor_name == Executors.SequentialExecutor:
return SequentialExecutor()
elif executor_name == Executors.CeleryExecutor:
from airflow.executors.celery_executor import CeleryExecutor
return CeleryExecutor()
elif executor_name == Executors.DaskExecutor:
from airflow.executors.dask_executor import DaskExecutor
return DaskExecutor()
elif executor_name == Executors.KubernetesExecutor:
from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
return KubernetesExecutor()
else:
# Loading plugins
_integrate_plugins()
executor_path = executor_name.split('.')
if len(executor_path) != 2:
raise AirflowException(
"Executor {0} not supported: "
"please specify in format plugin_module.executor".format(executor_name))
if executor_path[0] in globals():
return globals()[executor_path[0]].__dict__[executor_path[1]]()
else:
raise AirflowException("Executor {0} not supported.".format(executor_name)) | def _get_executor(executor_name):
if executor_name == Executors.LocalExecutor:
return LocalExecutor()
elif executor_name == Executors.SequentialExecutor:
return SequentialExecutor()
elif executor_name == Executors.CeleryExecutor:
from airflow.executors.celery_executor import CeleryExecutor
return CeleryExecutor()
elif executor_name == Executors.DaskExecutor:
from airflow.executors.dask_executor import DaskExecutor
return DaskExecutor()
elif executor_name == Executors.KubernetesExecutor:
from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
return KubernetesExecutor()
else:
# Loading plugins
_integrate_plugins()
executor_path = executor_name.split('.')
if len(executor_path) != 2:
raise AirflowException(
"Executor {0} not supported: "
"please specify in format plugin_module.executor".format(executor_name))
if executor_path[0] in globals():
return globals()[executor_path[0]].__dict__[executor_path[1]]()
else:
raise AirflowException("Executor {0} not supported.".format(executor_name)) | Creates a new instance of the named executor.
In case the executor name is not know in airflow,
look for it in the plugins |
def on_error(self, error, items):
"""
Handles error callbacks when using Segment with segment_debug_mode set to True
"""
self.log.error('Encountered Segment error: {segment_error} with '
'items: {with_items}'.format(segment_error=error,
with_items=items))
raise AirflowException('Segment error: {}'.format(error)) | def on_error(self, error, items):
self.log.error('Encountered Segment error: {segment_error} with '
'items: {with_items}'.format(segment_error=error,
with_items=items))
raise AirflowException('Segment error: {}'.format(error)) | Handles error callbacks when using Segment with segment_debug_mode set to True |
def trigger_dag(dag_id):
"""
Trigger a new dag run for a Dag with an execution date of now unless
specified in the data.
"""
data = request.get_json(force=True)
run_id = None
if 'run_id' in data:
run_id = data['run_id']
conf = None
if 'conf' in data:
conf = data['conf']
execution_date = None
if 'execution_date' in data and data['execution_date'] is not None:
execution_date = data['execution_date']
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'
.format(execution_date))
_log.info(error_message)
response = jsonify({'error': error_message})
response.status_code = 400
return response
try:
dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
if getattr(g, 'user', None):
_log.info("User %s created %s", g.user, dr)
response = jsonify(message="Created {}".format(dr))
return response | def trigger_dag(dag_id):
data = request.get_json(force=True)
run_id = None
if 'run_id' in data:
run_id = data['run_id']
conf = None
if 'conf' in data:
conf = data['conf']
execution_date = None
if 'execution_date' in data and data['execution_date'] is not None:
execution_date = data['execution_date']
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'
.format(execution_date))
_log.info(error_message)
response = jsonify({'error': error_message})
response.status_code = 400
return response
try:
dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
if getattr(g, 'user', None):
_log.info("User %s created %s", g.user, dr)
response = jsonify(message="Created {}".format(dr))
return response | Trigger a new dag run for a Dag with an execution date of now unless
specified in the data. |
def delete_dag(dag_id):
"""
Delete all DB records related to the specified Dag.
"""
try:
count = delete.delete_dag(dag_id)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
return jsonify(message="Removed {} record(s)".format(count), count=count) | def delete_dag(dag_id):
try:
count = delete.delete_dag(dag_id)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
return jsonify(message="Removed {} record(s)".format(count), count=count) | Delete all DB records related to the specified Dag. |
def task_info(dag_id, task_id):
"""Returns a JSON with a task's public instance variables. """
try:
info = get_task(dag_id, task_id)
except AirflowException as err:
_log.info(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
# JSONify and return.
fields = {k: str(v)
for k, v in vars(info).items()
if not k.startswith('_')}
return jsonify(fields) | def task_info(dag_id, task_id):
try:
info = get_task(dag_id, task_id)
except AirflowException as err:
_log.info(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
# JSONify and return.
fields = {k: str(v)
for k, v in vars(info).items()
if not k.startswith('_')}
return jsonify(fields) | Returns a JSON with a task's public instance variables. |
def create_or_update(self, resource_group, name, container_group):
"""
Create a new container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:param container_group: the properties of the container group
:type container_group: azure.mgmt.containerinstance.models.ContainerGroup
"""
self.connection.container_groups.create_or_update(resource_group,
name,
container_group) | def create_or_update(self, resource_group, name, container_group):
self.connection.container_groups.create_or_update(resource_group,
name,
container_group) | Create a new container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:param container_group: the properties of the container group
:type container_group: azure.mgmt.containerinstance.models.ContainerGroup |
def get_state_exitcode_details(self, resource_group, name):
"""
Get the state and exitcode of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:return: A tuple with the state, exitcode, and details.
If the exitcode is unknown 0 is returned.
:rtype: tuple(state,exitcode,details)
"""
current_state = self._get_instance_view(resource_group, name).current_state
return (current_state.state,
current_state.exit_code,
current_state.detail_status) | def get_state_exitcode_details(self, resource_group, name):
current_state = self._get_instance_view(resource_group, name).current_state
return (current_state.state,
current_state.exit_code,
current_state.detail_status) | Get the state and exitcode of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:return: A tuple with the state, exitcode, and details.
If the exitcode is unknown 0 is returned.
:rtype: tuple(state,exitcode,details) |
def get_messages(self, resource_group, name):
"""
Get the messages of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:return: A list of the event messages
:rtype: list[str]
"""
instance_view = self._get_instance_view(resource_group, name)
return [event.message for event in instance_view.events] | def get_messages(self, resource_group, name):
instance_view = self._get_instance_view(resource_group, name)
return [event.message for event in instance_view.events] | Get the messages of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:return: A list of the event messages
:rtype: list[str] |
def get_logs(self, resource_group, name, tail=1000):
"""
Get the tail from logs of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:param tail: the size of the tail
:type tail: int
:return: A list of log messages
:rtype: list[str]
"""
logs = self.connection.container.list_logs(resource_group, name, name, tail=tail)
return logs.content.splitlines(True) | def get_logs(self, resource_group, name, tail=1000):
logs = self.connection.container.list_logs(resource_group, name, name, tail=tail)
return logs.content.splitlines(True) | Get the tail from logs of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:param tail: the size of the tail
:type tail: int
:return: A list of log messages
:rtype: list[str] |
def delete(self, resource_group, name):
"""
Delete a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
"""
self.connection.container_groups.delete(resource_group, name) | def delete(self, resource_group, name):
self.connection.container_groups.delete(resource_group, name) | Delete a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str |
def exists(self, resource_group, name):
"""
Test if a container group exists
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
"""
for container in self.connection.container_groups.list_by_resource_group(resource_group):
if container.name == name:
return True
return False | def exists(self, resource_group, name):
for container in self.connection.container_groups.list_by_resource_group(resource_group):
if container.name == name:
return True
return False | Test if a container group exists
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str |
def apply_defaults(func):
"""
Function decorator that Looks for an argument named "default_args", and
fills the unspecified arguments from it.
Since python2.* isn't clear about which arguments are missing when
calling a function, and that this can be quite confusing with multi-level
inheritance and argument defaults, this decorator also alerts with
specific information about the missing arguments.
"""
# Cache inspect.signature for the wrapper closure to avoid calling it
# at every decorated invocation. This is separate sig_cache created
# per decoration, i.e. each function decorated using apply_defaults will
# have a different sig_cache.
sig_cache = signature(func)
non_optional_args = {
name for (name, param) in sig_cache.parameters.items()
if param.default == param.empty and
param.name != 'self' and
param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)}
@wraps(func)
def wrapper(*args, **kwargs):
if len(args) > 1:
raise AirflowException(
"Use keyword arguments when initializing operators")
dag_args = {}
dag_params = {}
dag = kwargs.get('dag', None) or settings.CONTEXT_MANAGER_DAG
if dag:
dag_args = copy(dag.default_args) or {}
dag_params = copy(dag.params) or {}
params = {}
if 'params' in kwargs:
params = kwargs['params']
dag_params.update(params)
default_args = {}
if 'default_args' in kwargs:
default_args = kwargs['default_args']
if 'params' in default_args:
dag_params.update(default_args['params'])
del default_args['params']
dag_args.update(default_args)
default_args = dag_args
for arg in sig_cache.parameters:
if arg not in kwargs and arg in default_args:
kwargs[arg] = default_args[arg]
missing_args = list(non_optional_args - set(kwargs))
if missing_args:
msg = "Argument {0} is required".format(missing_args)
raise AirflowException(msg)
kwargs['params'] = dag_params
result = func(*args, **kwargs)
return result
return wrapper | def apply_defaults(func):
# Cache inspect.signature for the wrapper closure to avoid calling it
# at every decorated invocation. This is separate sig_cache created
# per decoration, i.e. each function decorated using apply_defaults will
# have a different sig_cache.
sig_cache = signature(func)
non_optional_args = {
name for (name, param) in sig_cache.parameters.items()
if param.default == param.empty and
param.name != 'self' and
param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)}
@wraps(func)
def wrapper(*args, **kwargs):
if len(args) > 1:
raise AirflowException(
"Use keyword arguments when initializing operators")
dag_args = {}
dag_params = {}
dag = kwargs.get('dag', None) or settings.CONTEXT_MANAGER_DAG
if dag:
dag_args = copy(dag.default_args) or {}
dag_params = copy(dag.params) or {}
params = {}
if 'params' in kwargs:
params = kwargs['params']
dag_params.update(params)
default_args = {}
if 'default_args' in kwargs:
default_args = kwargs['default_args']
if 'params' in default_args:
dag_params.update(default_args['params'])
del default_args['params']
dag_args.update(default_args)
default_args = dag_args
for arg in sig_cache.parameters:
if arg not in kwargs and arg in default_args:
kwargs[arg] = default_args[arg]
missing_args = list(non_optional_args - set(kwargs))
if missing_args:
msg = "Argument {0} is required".format(missing_args)
raise AirflowException(msg)
kwargs['params'] = dag_params
result = func(*args, **kwargs)
return result
return wrapper | Function decorator that Looks for an argument named "default_args", and
fills the unspecified arguments from it.
Since python2.* isn't clear about which arguments are missing when
calling a function, and that this can be quite confusing with multi-level
inheritance and argument defaults, this decorator also alerts with
specific information about the missing arguments. |
def construct_ingest_query(self, static_path, columns):
"""
Builds an ingest query for an HDFS TSV load.
:param static_path: The path on hdfs where the data is
:type static_path: str
:param columns: List of all the columns that are available
:type columns: list
"""
# backward compatibility for num_shards,
# but target_partition_size is the default setting
# and overwrites the num_shards
num_shards = self.num_shards
target_partition_size = self.target_partition_size
if self.target_partition_size == -1:
if self.num_shards == -1:
target_partition_size = DEFAULT_TARGET_PARTITION_SIZE
else:
num_shards = -1
metric_names = [m['fieldName'] for m in self.metric_spec if m['type'] != 'count']
# Take all the columns, which are not the time dimension
# or a metric, as the dimension columns
dimensions = [c for c in columns if c not in metric_names and c != self.ts_dim]
ingest_query_dict = {
"type": "index_hadoop",
"spec": {
"dataSchema": {
"metricsSpec": self.metric_spec,
"granularitySpec": {
"queryGranularity": self.query_granularity,
"intervals": self.intervals,
"type": "uniform",
"segmentGranularity": self.segment_granularity,
},
"parser": {
"type": "string",
"parseSpec": {
"columns": columns,
"dimensionsSpec": {
"dimensionExclusions": [],
"dimensions": dimensions, # list of names
"spatialDimensions": []
},
"timestampSpec": {
"column": self.ts_dim,
"format": "auto"
},
"format": "tsv"
}
},
"dataSource": self.druid_datasource
},
"tuningConfig": {
"type": "hadoop",
"jobProperties": {
"mapreduce.job.user.classpath.first": "false",
"mapreduce.map.output.compress": "false",
"mapreduce.output.fileoutputformat.compress": "false",
},
"partitionsSpec": {
"type": "hashed",
"targetPartitionSize": target_partition_size,
"numShards": num_shards,
},
},
"ioConfig": {
"inputSpec": {
"paths": static_path,
"type": "static"
},
"type": "hadoop"
}
}
}
if self.job_properties:
ingest_query_dict['spec']['tuningConfig']['jobProperties'] \
.update(self.job_properties)
if self.hadoop_dependency_coordinates:
ingest_query_dict['hadoopDependencyCoordinates'] \
= self.hadoop_dependency_coordinates
return ingest_query_dict | def construct_ingest_query(self, static_path, columns):
# backward compatibility for num_shards,
# but target_partition_size is the default setting
# and overwrites the num_shards
num_shards = self.num_shards
target_partition_size = self.target_partition_size
if self.target_partition_size == -1:
if self.num_shards == -1:
target_partition_size = DEFAULT_TARGET_PARTITION_SIZE
else:
num_shards = -1
metric_names = [m['fieldName'] for m in self.metric_spec if m['type'] != 'count']
# Take all the columns, which are not the time dimension
# or a metric, as the dimension columns
dimensions = [c for c in columns if c not in metric_names and c != self.ts_dim]
ingest_query_dict = {
"type": "index_hadoop",
"spec": {
"dataSchema": {
"metricsSpec": self.metric_spec,
"granularitySpec": {
"queryGranularity": self.query_granularity,
"intervals": self.intervals,
"type": "uniform",
"segmentGranularity": self.segment_granularity,
},
"parser": {
"type": "string",
"parseSpec": {
"columns": columns,
"dimensionsSpec": {
"dimensionExclusions": [],
"dimensions": dimensions, # list of names
"spatialDimensions": []
},
"timestampSpec": {
"column": self.ts_dim,
"format": "auto"
},
"format": "tsv"
}
},
"dataSource": self.druid_datasource
},
"tuningConfig": {
"type": "hadoop",
"jobProperties": {
"mapreduce.job.user.classpath.first": "false",
"mapreduce.map.output.compress": "false",
"mapreduce.output.fileoutputformat.compress": "false",
},
"partitionsSpec": {
"type": "hashed",
"targetPartitionSize": target_partition_size,
"numShards": num_shards,
},
},
"ioConfig": {
"inputSpec": {
"paths": static_path,
"type": "static"
},
"type": "hadoop"
}
}
}
if self.job_properties:
ingest_query_dict['spec']['tuningConfig']['jobProperties'] \
.update(self.job_properties)
if self.hadoop_dependency_coordinates:
ingest_query_dict['hadoopDependencyCoordinates'] \
= self.hadoop_dependency_coordinates
return ingest_query_dict | Builds an ingest query for an HDFS TSV load.
:param static_path: The path on hdfs where the data is
:type static_path: str
:param columns: List of all the columns that are available
:type columns: list |
def poke(self, context):
"""
Check for message on subscribed channels and write to xcom the message with key ``message``
An example of message ``{'type': 'message', 'pattern': None, 'channel': b'test', 'data': b'hello'}``
:param context: the context object
:type context: dict
:return: ``True`` if message (with type 'message') is available or ``False`` if not
"""
self.log.info('RedisPubSubSensor checking for message on channels: %s', self.channels)
message = self.pubsub.get_message()
self.log.info('Message %s from channel %s', message, self.channels)
# Process only message types
if message and message['type'] == 'message':
context['ti'].xcom_push(key='message', value=message)
self.pubsub.unsubscribe(self.channels)
return True
return False | def poke(self, context):
self.log.info('RedisPubSubSensor checking for message on channels: %s', self.channels)
message = self.pubsub.get_message()
self.log.info('Message %s from channel %s', message, self.channels)
# Process only message types
if message and message['type'] == 'message':
context['ti'].xcom_push(key='message', value=message)
self.pubsub.unsubscribe(self.channels)
return True
return False | Check for message on subscribed channels and write to xcom the message with key ``message``
An example of message ``{'type': 'message', 'pattern': None, 'channel': b'test', 'data': b'hello'}``
:param context: the context object
:type context: dict
:return: ``True`` if message (with type 'message') is available or ``False`` if not |
def find(dag_id=None, run_id=None, execution_date=None,
state=None, external_trigger=None, no_backfills=False,
session=None):
"""
Returns a set of dag runs for the given search criteria.
:param dag_id: the dag_id to find dag runs for
:type dag_id: int, list
:param run_id: defines the the run id for this dag run
:type run_id: str
:param execution_date: the execution date
:type execution_date: datetime.datetime
:param state: the state of the dag run
:type state: airflow.utils.state.State
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param no_backfills: return no backfills (True), return all (False).
Defaults to False
:type no_backfills: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
"""
DR = DagRun
qry = session.query(DR)
if dag_id:
qry = qry.filter(DR.dag_id == dag_id)
if run_id:
qry = qry.filter(DR.run_id == run_id)
if execution_date:
if isinstance(execution_date, list):
qry = qry.filter(DR.execution_date.in_(execution_date))
else:
qry = qry.filter(DR.execution_date == execution_date)
if state:
qry = qry.filter(DR.state == state)
if external_trigger is not None:
qry = qry.filter(DR.external_trigger == external_trigger)
if no_backfills:
# in order to prevent a circular dependency
from airflow.jobs import BackfillJob
qry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'))
dr = qry.order_by(DR.execution_date).all()
return dr | def find(dag_id=None, run_id=None, execution_date=None,
state=None, external_trigger=None, no_backfills=False,
session=None):
DR = DagRun
qry = session.query(DR)
if dag_id:
qry = qry.filter(DR.dag_id == dag_id)
if run_id:
qry = qry.filter(DR.run_id == run_id)
if execution_date:
if isinstance(execution_date, list):
qry = qry.filter(DR.execution_date.in_(execution_date))
else:
qry = qry.filter(DR.execution_date == execution_date)
if state:
qry = qry.filter(DR.state == state)
if external_trigger is not None:
qry = qry.filter(DR.external_trigger == external_trigger)
if no_backfills:
# in order to prevent a circular dependency
from airflow.jobs import BackfillJob
qry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'))
dr = qry.order_by(DR.execution_date).all()
return dr | Returns a set of dag runs for the given search criteria.
:param dag_id: the dag_id to find dag runs for
:type dag_id: int, list
:param run_id: defines the the run id for this dag run
:type run_id: str
:param execution_date: the execution date
:type execution_date: datetime.datetime
:param state: the state of the dag run
:type state: airflow.utils.state.State
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param no_backfills: return no backfills (True), return all (False).
Defaults to False
:type no_backfills: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session |
def get_task_instance(self, task_id, session=None):
"""
Returns the task instance specified by task_id for this dag run
:param task_id: the task id
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
TI.task_id == task_id
).first()
return ti | def get_task_instance(self, task_id, session=None):
from airflow.models.taskinstance import TaskInstance # Avoid circular import
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
TI.task_id == task_id
).first()
return ti | Returns the task instance specified by task_id for this dag run
:param task_id: the task id |
def update_state(self, session=None):
"""
Determines the overall state of the DagRun based on the state
of its TaskInstances.
:return: State
"""
dag = self.get_dag()
tis = self.get_task_instances(session=session)
self.log.debug("Updating state for %s considering %s task(s)", self, len(tis))
for ti in list(tis):
# skip in db?
if ti.state == State.REMOVED:
tis.remove(ti)
else:
ti.task = dag.get_task(ti.task_id)
# pre-calculate
# db is faster
start_dttm = timezone.utcnow()
unfinished_tasks = self.get_task_instances(
state=State.unfinished(),
session=session
)
none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks)
none_task_concurrency = all(t.task.task_concurrency is None
for t in unfinished_tasks)
# small speed up
if unfinished_tasks and none_depends_on_past and none_task_concurrency:
# todo: this can actually get pretty slow: one task costs between 0.01-015s
no_dependencies_met = True
for ut in unfinished_tasks:
# We need to flag upstream and check for changes because upstream
# failures/re-schedules can result in deadlock false positives
old_state = ut.state
deps_met = ut.are_dependencies_met(
dep_context=DepContext(
flag_upstream_failed=True,
ignore_in_retry_period=True,
ignore_in_reschedule_period=True),
session=session)
if deps_met or old_state != ut.current_state(session=session):
no_dependencies_met = False
break
duration = (timezone.utcnow() - start_dttm).total_seconds() * 1000
Stats.timing("dagrun.dependency-check.{}".format(self.dag_id), duration)
root_ids = [t.task_id for t in dag.roots]
roots = [t for t in tis if t.task_id in root_ids]
# if all roots finished and at least one failed, the run failed
if (not unfinished_tasks and
any(r.state in (State.FAILED, State.UPSTREAM_FAILED) for r in roots)):
self.log.info('Marking run %s failed', self)
self.set_state(State.FAILED)
dag.handle_callback(self, success=False, reason='task_failure',
session=session)
# if all roots succeeded and no unfinished tasks, the run succeeded
elif not unfinished_tasks and all(r.state in (State.SUCCESS, State.SKIPPED)
for r in roots):
self.log.info('Marking run %s successful', self)
self.set_state(State.SUCCESS)
dag.handle_callback(self, success=True, reason='success', session=session)
# if *all tasks* are deadlocked, the run failed
elif (unfinished_tasks and none_depends_on_past and
none_task_concurrency and no_dependencies_met):
self.log.info('Deadlock; marking run %s failed', self)
self.set_state(State.FAILED)
dag.handle_callback(self, success=False, reason='all_tasks_deadlocked',
session=session)
# finally, if the roots aren't done, the dag is still running
else:
self.set_state(State.RUNNING)
self._emit_duration_stats_for_finished_state()
# todo: determine we want to use with_for_update to make sure to lock the run
session.merge(self)
session.commit()
return self.state | def update_state(self, session=None):
dag = self.get_dag()
tis = self.get_task_instances(session=session)
self.log.debug("Updating state for %s considering %s task(s)", self, len(tis))
for ti in list(tis):
# skip in db?
if ti.state == State.REMOVED:
tis.remove(ti)
else:
ti.task = dag.get_task(ti.task_id)
# pre-calculate
# db is faster
start_dttm = timezone.utcnow()
unfinished_tasks = self.get_task_instances(
state=State.unfinished(),
session=session
)
none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks)
none_task_concurrency = all(t.task.task_concurrency is None
for t in unfinished_tasks)
# small speed up
if unfinished_tasks and none_depends_on_past and none_task_concurrency:
# todo: this can actually get pretty slow: one task costs between 0.01-015s
no_dependencies_met = True
for ut in unfinished_tasks:
# We need to flag upstream and check for changes because upstream
# failures/re-schedules can result in deadlock false positives
old_state = ut.state
deps_met = ut.are_dependencies_met(
dep_context=DepContext(
flag_upstream_failed=True,
ignore_in_retry_period=True,
ignore_in_reschedule_period=True),
session=session)
if deps_met or old_state != ut.current_state(session=session):
no_dependencies_met = False
break
duration = (timezone.utcnow() - start_dttm).total_seconds() * 1000
Stats.timing("dagrun.dependency-check.{}".format(self.dag_id), duration)
root_ids = [t.task_id for t in dag.roots]
roots = [t for t in tis if t.task_id in root_ids]
# if all roots finished and at least one failed, the run failed
if (not unfinished_tasks and
any(r.state in (State.FAILED, State.UPSTREAM_FAILED) for r in roots)):
self.log.info('Marking run %s failed', self)
self.set_state(State.FAILED)
dag.handle_callback(self, success=False, reason='task_failure',
session=session)
# if all roots succeeded and no unfinished tasks, the run succeeded
elif not unfinished_tasks and all(r.state in (State.SUCCESS, State.SKIPPED)
for r in roots):
self.log.info('Marking run %s successful', self)
self.set_state(State.SUCCESS)
dag.handle_callback(self, success=True, reason='success', session=session)
# if *all tasks* are deadlocked, the run failed
elif (unfinished_tasks and none_depends_on_past and
none_task_concurrency and no_dependencies_met):
self.log.info('Deadlock; marking run %s failed', self)
self.set_state(State.FAILED)
dag.handle_callback(self, success=False, reason='all_tasks_deadlocked',
session=session)
# finally, if the roots aren't done, the dag is still running
else:
self.set_state(State.RUNNING)
self._emit_duration_stats_for_finished_state()
# todo: determine we want to use with_for_update to make sure to lock the run
session.merge(self)
session.commit()
return self.state | Determines the overall state of the DagRun based on the state
of its TaskInstances.
:return: State |
def verify_integrity(self, session=None):
"""
Verifies the DagRun by checking for removed tasks or tasks that are not in the
database yet. It will set state to removed or add the task if required.
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
dag = self.get_dag()
tis = self.get_task_instances(session=session)
# check for removed or restored tasks
task_ids = []
for ti in tis:
task_ids.append(ti.task_id)
task = None
try:
task = dag.get_task(ti.task_id)
except AirflowException:
if ti.state == State.REMOVED:
pass # ti has already been removed, just ignore it
elif self.state is not State.RUNNING and not dag.partial:
self.log.warning("Failed to get task '{}' for dag '{}'. "
"Marking it as removed.".format(ti, dag))
Stats.incr(
"task_removed_from_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.REMOVED
is_task_in_dag = task is not None
should_restore_task = is_task_in_dag and ti.state == State.REMOVED
if should_restore_task:
self.log.info("Restoring task '{}' which was previously "
"removed from DAG '{}'".format(ti, dag))
Stats.incr("task_restored_to_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.NONE
# check for missing tasks
for task in six.itervalues(dag.task_dict):
if task.start_date > self.execution_date and not self.is_backfill:
continue
if task.task_id not in task_ids:
Stats.incr(
"task_instance_created-{}".format(task.__class__.__name__),
1, 1)
ti = TaskInstance(task, self.execution_date)
session.add(ti)
session.commit() | def verify_integrity(self, session=None):
from airflow.models.taskinstance import TaskInstance # Avoid circular import
dag = self.get_dag()
tis = self.get_task_instances(session=session)
# check for removed or restored tasks
task_ids = []
for ti in tis:
task_ids.append(ti.task_id)
task = None
try:
task = dag.get_task(ti.task_id)
except AirflowException:
if ti.state == State.REMOVED:
pass # ti has already been removed, just ignore it
elif self.state is not State.RUNNING and not dag.partial:
self.log.warning("Failed to get task '{}' for dag '{}'. "
"Marking it as removed.".format(ti, dag))
Stats.incr(
"task_removed_from_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.REMOVED
is_task_in_dag = task is not None
should_restore_task = is_task_in_dag and ti.state == State.REMOVED
if should_restore_task:
self.log.info("Restoring task '{}' which was previously "
"removed from DAG '{}'".format(ti, dag))
Stats.incr("task_restored_to_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.NONE
# check for missing tasks
for task in six.itervalues(dag.task_dict):
if task.start_date > self.execution_date and not self.is_backfill:
continue
if task.task_id not in task_ids:
Stats.incr(
"task_instance_created-{}".format(task.__class__.__name__),
1, 1)
ti = TaskInstance(task, self.execution_date)
session.add(ti)
session.commit() | Verifies the DagRun by checking for removed tasks or tasks that are not in the
database yet. It will set state to removed or add the task if required. |
def jenkins_request_with_headers(jenkins_server, req):
"""
We need to get the headers in addition to the body answer
to get the location from them
This function uses jenkins_request method from python-jenkins library
with just the return call changed
:param jenkins_server: The server to query
:param req: The request to execute
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
try:
response = jenkins_server.jenkins_request(req)
response_body = response.content
response_headers = response.headers
if response_body is None:
raise jenkins.EmptyResponseException(
"Error communicating with server[%s]: "
"empty response" % jenkins_server.server)
return {'body': response_body.decode('utf-8'), 'headers': response_headers}
except HTTPError as e:
# Jenkins's funky authentication means its nigh impossible to
# distinguish errors.
if e.code in [401, 403, 500]:
# six.moves.urllib.error.HTTPError provides a 'reason'
# attribute for all python version except for ver 2.6
# Falling back to HTTPError.msg since it contains the
# same info as reason
raise JenkinsException(
'Error in request. ' +
'Possibly authentication failed [%s]: %s' % (
e.code, e.msg)
)
elif e.code == 404:
raise jenkins.NotFoundException('Requested item could not be found')
else:
raise
except socket.timeout as e:
raise jenkins.TimeoutException('Error in request: %s' % e)
except URLError as e:
# python 2.6 compatibility to ensure same exception raised
# since URLError wraps a socket timeout on python 2.6.
if str(e.reason) == "timed out":
raise jenkins.TimeoutException('Error in request: %s' % e.reason)
raise JenkinsException('Error in request: %s' % e.reason) | def jenkins_request_with_headers(jenkins_server, req):
try:
response = jenkins_server.jenkins_request(req)
response_body = response.content
response_headers = response.headers
if response_body is None:
raise jenkins.EmptyResponseException(
"Error communicating with server[%s]: "
"empty response" % jenkins_server.server)
return {'body': response_body.decode('utf-8'), 'headers': response_headers}
except HTTPError as e:
# Jenkins's funky authentication means its nigh impossible to
# distinguish errors.
if e.code in [401, 403, 500]:
# six.moves.urllib.error.HTTPError provides a 'reason'
# attribute for all python version except for ver 2.6
# Falling back to HTTPError.msg since it contains the
# same info as reason
raise JenkinsException(
'Error in request. ' +
'Possibly authentication failed [%s]: %s' % (
e.code, e.msg)
)
elif e.code == 404:
raise jenkins.NotFoundException('Requested item could not be found')
else:
raise
except socket.timeout as e:
raise jenkins.TimeoutException('Error in request: %s' % e)
except URLError as e:
# python 2.6 compatibility to ensure same exception raised
# since URLError wraps a socket timeout on python 2.6.
if str(e.reason) == "timed out":
raise jenkins.TimeoutException('Error in request: %s' % e.reason)
raise JenkinsException('Error in request: %s' % e.reason) | We need to get the headers in addition to the body answer
to get the location from them
This function uses jenkins_request method from python-jenkins library
with just the return call changed
:param jenkins_server: The server to query
:param req: The request to execute
:return: Dict containing the response body (key body)
and the headers coming along (headers) |
def context_to_airflow_vars(context, in_env_var_format=False):
"""
Given a context, this function provides a dictionary of values that can be used to
externally reconstruct relations between dags, dag_runs, tasks and task_instances.
Default to abc.def.ghi format and can be made to ABC_DEF_GHI format if
in_env_var_format is set to True.
:param context: The context for the task_instance of interest.
:type context: dict
:param in_env_var_format: If returned vars should be in ABC_DEF_GHI format.
:type in_env_var_format: bool
:return: task_instance context as dict.
"""
params = dict()
if in_env_var_format:
name_format = 'env_var_format'
else:
name_format = 'default'
task_instance = context.get('task_instance')
if task_instance and task_instance.dag_id:
params[AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID'][
name_format]] = task_instance.dag_id
if task_instance and task_instance.task_id:
params[AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID'][
name_format]] = task_instance.task_id
if task_instance and task_instance.execution_date:
params[
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][
name_format]] = task_instance.execution_date.isoformat()
dag_run = context.get('dag_run')
if dag_run and dag_run.run_id:
params[AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][
name_format]] = dag_run.run_id
return params | def context_to_airflow_vars(context, in_env_var_format=False):
params = dict()
if in_env_var_format:
name_format = 'env_var_format'
else:
name_format = 'default'
task_instance = context.get('task_instance')
if task_instance and task_instance.dag_id:
params[AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID'][
name_format]] = task_instance.dag_id
if task_instance and task_instance.task_id:
params[AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID'][
name_format]] = task_instance.task_id
if task_instance and task_instance.execution_date:
params[
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][
name_format]] = task_instance.execution_date.isoformat()
dag_run = context.get('dag_run')
if dag_run and dag_run.run_id:
params[AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][
name_format]] = dag_run.run_id
return params | Given a context, this function provides a dictionary of values that can be used to
externally reconstruct relations between dags, dag_runs, tasks and task_instances.
Default to abc.def.ghi format and can be made to ABC_DEF_GHI format if
in_env_var_format is set to True.
:param context: The context for the task_instance of interest.
:type context: dict
:param in_env_var_format: If returned vars should be in ABC_DEF_GHI format.
:type in_env_var_format: bool
:return: task_instance context as dict. |
def conditionally_trigger(context, dag_run_obj):
"""This function decides whether or not to Trigger the remote DAG"""
c_p = context['params']['condition_param']
print("Controller DAG : conditionally_trigger = {}".format(c_p))
if context['params']['condition_param']:
dag_run_obj.payload = {'message': context['params']['message']}
pp.pprint(dag_run_obj.payload)
return dag_run_obj | def conditionally_trigger(context, dag_run_obj):
c_p = context['params']['condition_param']
print("Controller DAG : conditionally_trigger = {}".format(c_p))
if context['params']['condition_param']:
dag_run_obj.payload = {'message': context['params']['message']}
pp.pprint(dag_run_obj.payload)
return dag_run_obj | This function decides whether or not to Trigger the remote DAG |
def send_metric(self, metric_name, datapoint, tags=None, type_=None, interval=None):
"""
Sends a single datapoint metric to DataDog
:param metric_name: The name of the metric
:type metric_name: str
:param datapoint: A single integer or float related to the metric
:type datapoint: int or float
:param tags: A list of tags associated with the metric
:type tags: list
:param type_: Type of your metric: gauge, rate, or count
:type type_: str
:param interval: If the type of the metric is rate or count, define the corresponding interval
:type interval: int
"""
response = api.Metric.send(
metric=metric_name,
points=datapoint,
host=self.host,
tags=tags,
type=type_,
interval=interval)
self.validate_response(response)
return response | def send_metric(self, metric_name, datapoint, tags=None, type_=None, interval=None):
response = api.Metric.send(
metric=metric_name,
points=datapoint,
host=self.host,
tags=tags,
type=type_,
interval=interval)
self.validate_response(response)
return response | Sends a single datapoint metric to DataDog
:param metric_name: The name of the metric
:type metric_name: str
:param datapoint: A single integer or float related to the metric
:type datapoint: int or float
:param tags: A list of tags associated with the metric
:type tags: list
:param type_: Type of your metric: gauge, rate, or count
:type type_: str
:param interval: If the type of the metric is rate or count, define the corresponding interval
:type interval: int |
def query_metric(self,
query,
from_seconds_ago,
to_seconds_ago):
"""
Queries datadog for a specific metric, potentially with some
function applied to it and returns the results.
:param query: The datadog query to execute (see datadog docs)
:type query: str
:param from_seconds_ago: How many seconds ago to start querying for.
:type from_seconds_ago: int
:param to_seconds_ago: Up to how many seconds ago to query for.
:type to_seconds_ago: int
"""
now = int(time.time())
response = api.Metric.query(
start=now - from_seconds_ago,
end=now - to_seconds_ago,
query=query)
self.validate_response(response)
return response | def query_metric(self,
query,
from_seconds_ago,
to_seconds_ago):
now = int(time.time())
response = api.Metric.query(
start=now - from_seconds_ago,
end=now - to_seconds_ago,
query=query)
self.validate_response(response)
return response | Queries datadog for a specific metric, potentially with some
function applied to it and returns the results.
:param query: The datadog query to execute (see datadog docs)
:type query: str
:param from_seconds_ago: How many seconds ago to start querying for.
:type from_seconds_ago: int
:param to_seconds_ago: Up to how many seconds ago to query for.
:type to_seconds_ago: int |
def get_dag(self, dag_id):
"""
Gets the DAG out of the dictionary, and refreshes it if expired
"""
from airflow.models.dag import DagModel # Avoid circular import
# If asking for a known subdag, we want to refresh the parent
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.is_subdag:
root_dag_id = dag.parent_dag.dag_id
# If the dag corresponding to root_dag_id is absent or expired
orm_dag = DagModel.get_current(root_dag_id)
if orm_dag and (
root_dag_id not in self.dags or
(
orm_dag.last_expired and
dag.last_loaded < orm_dag.last_expired
)
):
# Reprocess source file
found_dags = self.process_file(
filepath=orm_dag.fileloc, only_if_updated=False)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id) | def get_dag(self, dag_id):
from airflow.models.dag import DagModel # Avoid circular import
# If asking for a known subdag, we want to refresh the parent
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.is_subdag:
root_dag_id = dag.parent_dag.dag_id
# If the dag corresponding to root_dag_id is absent or expired
orm_dag = DagModel.get_current(root_dag_id)
if orm_dag and (
root_dag_id not in self.dags or
(
orm_dag.last_expired and
dag.last_loaded < orm_dag.last_expired
)
):
# Reprocess source file
found_dags = self.process_file(
filepath=orm_dag.fileloc, only_if_updated=False)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id) | Gets the DAG out of the dictionary, and refreshes it if expired |
def kill_zombies(self, zombies, session=None):
"""
Fail given zombie tasks, which are tasks that haven't
had a heartbeat for too long, in the current DagBag.
:param zombies: zombie task instances to kill.
:type zombies: airflow.utils.dag_processing.SimpleTaskInstance
:param session: DB session.
:type session: sqlalchemy.orm.session.Session
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
for zombie in zombies:
if zombie.dag_id in self.dags:
dag = self.dags[zombie.dag_id]
if zombie.task_id in dag.task_ids:
task = dag.get_task(zombie.task_id)
ti = TaskInstance(task, zombie.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = zombie.start_date
ti.end_date = zombie.end_date
ti.try_number = zombie.try_number
ti.state = zombie.state
ti.test_mode = configuration.getboolean('core', 'unit_test_mode')
ti.handle_failure("{} detected as zombie".format(ti),
ti.test_mode, ti.get_template_context())
self.log.info(
'Marked zombie job %s as %s', ti, ti.state)
Stats.incr('zombies_killed')
session.commit() | def kill_zombies(self, zombies, session=None):
from airflow.models.taskinstance import TaskInstance # Avoid circular import
for zombie in zombies:
if zombie.dag_id in self.dags:
dag = self.dags[zombie.dag_id]
if zombie.task_id in dag.task_ids:
task = dag.get_task(zombie.task_id)
ti = TaskInstance(task, zombie.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = zombie.start_date
ti.end_date = zombie.end_date
ti.try_number = zombie.try_number
ti.state = zombie.state
ti.test_mode = configuration.getboolean('core', 'unit_test_mode')
ti.handle_failure("{} detected as zombie".format(ti),
ti.test_mode, ti.get_template_context())
self.log.info(
'Marked zombie job %s as %s', ti, ti.state)
Stats.incr('zombies_killed')
session.commit() | Fail given zombie tasks, which are tasks that haven't
had a heartbeat for too long, in the current DagBag.
:param zombies: zombie task instances to kill.
:type zombies: airflow.utils.dag_processing.SimpleTaskInstance
:param session: DB session.
:type session: sqlalchemy.orm.session.Session |
def bag_dag(self, dag, parent_dag, root_dag):
"""
Adds the DAG into the bag, recurses into sub dags.
Throws AirflowDagCycleException if a cycle is detected in this dag or its subdags
"""
dag.test_cycle() # throws if a task cycle is found
dag.resolve_template_files()
dag.last_loaded = timezone.utcnow()
for task in dag.tasks:
settings.policy(task)
subdags = dag.subdags
try:
for subdag in subdags:
subdag.full_filepath = dag.full_filepath
subdag.parent_dag = dag
subdag.is_subdag = True
self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag)
self.dags[dag.dag_id] = dag
self.log.debug('Loaded DAG %s', dag)
except AirflowDagCycleException as cycle_exception:
# There was an error in bagging the dag. Remove it from the list of dags
self.log.exception('Exception bagging dag: %s', dag.dag_id)
# Only necessary at the root level since DAG.subdags automatically
# performs DFS to search through all subdags
if dag == root_dag:
for subdag in subdags:
if subdag.dag_id in self.dags:
del self.dags[subdag.dag_id]
raise cycle_exception | def bag_dag(self, dag, parent_dag, root_dag):
dag.test_cycle() # throws if a task cycle is found
dag.resolve_template_files()
dag.last_loaded = timezone.utcnow()
for task in dag.tasks:
settings.policy(task)
subdags = dag.subdags
try:
for subdag in subdags:
subdag.full_filepath = dag.full_filepath
subdag.parent_dag = dag
subdag.is_subdag = True
self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag)
self.dags[dag.dag_id] = dag
self.log.debug('Loaded DAG %s', dag)
except AirflowDagCycleException as cycle_exception:
# There was an error in bagging the dag. Remove it from the list of dags
self.log.exception('Exception bagging dag: %s', dag.dag_id)
# Only necessary at the root level since DAG.subdags automatically
# performs DFS to search through all subdags
if dag == root_dag:
for subdag in subdags:
if subdag.dag_id in self.dags:
del self.dags[subdag.dag_id]
raise cycle_exception | Adds the DAG into the bag, recurses into sub dags.
Throws AirflowDagCycleException if a cycle is detected in this dag or its subdags |
def collect_dags(
self,
dag_folder=None,
only_if_updated=True,
include_examples=configuration.conf.getboolean('core', 'LOAD_EXAMPLES'),
safe_mode=configuration.conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE')):
"""
Given a file path or a folder, this method looks for python modules,
imports them and adds them to the dagbag collection.
Note that if a ``.airflowignore`` file is found while processing
the directory, it will behave much like a ``.gitignore``,
ignoring files that match any of the regex patterns specified
in the file.
**Note**: The patterns in .airflowignore are treated as
un-anchored regexes, not shell-like glob patterns.
"""
start_dttm = timezone.utcnow()
dag_folder = dag_folder or self.dag_folder
# Used to store stats around DagBag processing
stats = []
FileLoadStat = namedtuple(
'FileLoadStat', "file duration dag_num task_num dags")
dag_folder = correct_maybe_zipped(dag_folder)
for filepath in list_py_file_paths(dag_folder, safe_mode=safe_mode,
include_examples=include_examples):
try:
ts = timezone.utcnow()
found_dags = self.process_file(
filepath, only_if_updated=only_if_updated,
safe_mode=safe_mode)
td = timezone.utcnow() - ts
td = td.total_seconds() + (
float(td.microseconds) / 1000000)
stats.append(FileLoadStat(
filepath.replace(dag_folder, ''),
td,
len(found_dags),
sum([len(dag.tasks) for dag in found_dags]),
str([dag.dag_id for dag in found_dags]),
))
except Exception as e:
self.log.exception(e)
Stats.gauge(
'collect_dags', (timezone.utcnow() - start_dttm).total_seconds(), 1)
Stats.gauge(
'dagbag_size', len(self.dags), 1)
Stats.gauge(
'dagbag_import_errors', len(self.import_errors), 1)
self.dagbag_stats = sorted(
stats, key=lambda x: x.duration, reverse=True) | def collect_dags(
self,
dag_folder=None,
only_if_updated=True,
include_examples=configuration.conf.getboolean('core', 'LOAD_EXAMPLES'),
safe_mode=configuration.conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE')):
start_dttm = timezone.utcnow()
dag_folder = dag_folder or self.dag_folder
# Used to store stats around DagBag processing
stats = []
FileLoadStat = namedtuple(
'FileLoadStat', "file duration dag_num task_num dags")
dag_folder = correct_maybe_zipped(dag_folder)
for filepath in list_py_file_paths(dag_folder, safe_mode=safe_mode,
include_examples=include_examples):
try:
ts = timezone.utcnow()
found_dags = self.process_file(
filepath, only_if_updated=only_if_updated,
safe_mode=safe_mode)
td = timezone.utcnow() - ts
td = td.total_seconds() + (
float(td.microseconds) / 1000000)
stats.append(FileLoadStat(
filepath.replace(dag_folder, ''),
td,
len(found_dags),
sum([len(dag.tasks) for dag in found_dags]),
str([dag.dag_id for dag in found_dags]),
))
except Exception as e:
self.log.exception(e)
Stats.gauge(
'collect_dags', (timezone.utcnow() - start_dttm).total_seconds(), 1)
Stats.gauge(
'dagbag_size', len(self.dags), 1)
Stats.gauge(
'dagbag_import_errors', len(self.import_errors), 1)
self.dagbag_stats = sorted(
stats, key=lambda x: x.duration, reverse=True) | Given a file path or a folder, this method looks for python modules,
imports them and adds them to the dagbag collection.
Note that if a ``.airflowignore`` file is found while processing
the directory, it will behave much like a ``.gitignore``,
ignoring files that match any of the regex patterns specified
in the file.
**Note**: The patterns in .airflowignore are treated as
un-anchored regexes, not shell-like glob patterns. |
def ds_add(ds, days):
"""
Add or subtract days from a YYYY-MM-DD
:param ds: anchor date in ``YYYY-MM-DD`` format to add to
:type ds: str
:param days: number of days to add to the ds, you can use negative values
:type days: int
>>> ds_add('2015-01-01', 5)
'2015-01-06'
>>> ds_add('2015-01-06', -5)
'2015-01-01'
"""
ds = datetime.strptime(ds, '%Y-%m-%d')
if days:
ds = ds + timedelta(days)
return ds.isoformat()[:10] | def ds_add(ds, days):
ds = datetime.strptime(ds, '%Y-%m-%d')
if days:
ds = ds + timedelta(days)
return ds.isoformat()[:10] | Add or subtract days from a YYYY-MM-DD
:param ds: anchor date in ``YYYY-MM-DD`` format to add to
:type ds: str
:param days: number of days to add to the ds, you can use negative values
:type days: int
>>> ds_add('2015-01-01', 5)
'2015-01-06'
>>> ds_add('2015-01-06', -5)
'2015-01-01' |
def ds_format(ds, input_format, output_format):
"""
Takes an input string and outputs another string
as specified in the output format
:param ds: input string which contains a date
:type ds: str
:param input_format: input string format. E.g. %Y-%m-%d
:type input_format: str
:param output_format: output string format E.g. %Y-%m-%d
:type output_format: str
>>> ds_format('2015-01-01', "%Y-%m-%d", "%m-%d-%y")
'01-01-15'
>>> ds_format('1/5/2015', "%m/%d/%Y", "%Y-%m-%d")
'2015-01-05'
"""
return datetime.strptime(ds, input_format).strftime(output_format) | def ds_format(ds, input_format, output_format):
return datetime.strptime(ds, input_format).strftime(output_format) | Takes an input string and outputs another string
as specified in the output format
:param ds: input string which contains a date
:type ds: str
:param input_format: input string format. E.g. %Y-%m-%d
:type input_format: str
:param output_format: output string format E.g. %Y-%m-%d
:type output_format: str
>>> ds_format('2015-01-01', "%Y-%m-%d", "%m-%d-%y")
'01-01-15'
>>> ds_format('1/5/2015', "%m/%d/%Y", "%Y-%m-%d")
'2015-01-05' |
def poke(self, context):
"""
poke matching files in a directory with self.regex
:return: Bool depending on the search criteria
"""
sb = self.hook(self.hdfs_conn_id).get_conn()
self.log.info(
'Poking for %s to be a directory with files matching %s', self.filepath, self.regex.pattern
)
result = [f for f in sb.ls([self.filepath], include_toplevel=False) if
f['file_type'] == 'f' and
self.regex.match(f['path'].replace('%s/' % self.filepath, ''))]
result = self.filter_for_ignored_ext(result, self.ignored_ext,
self.ignore_copying)
result = self.filter_for_filesize(result, self.file_size)
return bool(result) | def poke(self, context):
sb = self.hook(self.hdfs_conn_id).get_conn()
self.log.info(
'Poking for %s to be a directory with files matching %s', self.filepath, self.regex.pattern
)
result = [f for f in sb.ls([self.filepath], include_toplevel=False) if
f['file_type'] == 'f' and
self.regex.match(f['path'].replace('%s/' % self.filepath, ''))]
result = self.filter_for_ignored_ext(result, self.ignored_ext,
self.ignore_copying)
result = self.filter_for_filesize(result, self.file_size)
return bool(result) | poke matching files in a directory with self.regex
:return: Bool depending on the search criteria |
def poke(self, context):
"""
poke for a non empty directory
:return: Bool depending on the search criteria
"""
sb = self.hook(self.hdfs_conn_id).get_conn()
result = [f for f in sb.ls([self.filepath], include_toplevel=True)]
result = self.filter_for_ignored_ext(result, self.ignored_ext,
self.ignore_copying)
result = self.filter_for_filesize(result, self.file_size)
if self.be_empty:
self.log.info('Poking for filepath %s to a empty directory', self.filepath)
return len(result) == 1 and result[0]['path'] == self.filepath
else:
self.log.info('Poking for filepath %s to a non empty directory', self.filepath)
result.pop(0)
return bool(result) and result[0]['file_type'] == 'f' | def poke(self, context):
sb = self.hook(self.hdfs_conn_id).get_conn()
result = [f for f in sb.ls([self.filepath], include_toplevel=True)]
result = self.filter_for_ignored_ext(result, self.ignored_ext,
self.ignore_copying)
result = self.filter_for_filesize(result, self.file_size)
if self.be_empty:
self.log.info('Poking for filepath %s to a empty directory', self.filepath)
return len(result) == 1 and result[0]['path'] == self.filepath
else:
self.log.info('Poking for filepath %s to a non empty directory', self.filepath)
result.pop(0)
return bool(result) and result[0]['file_type'] == 'f' | poke for a non empty directory
:return: Bool depending on the search criteria |
def clear_task_instances(tis,
session,
activate_dag_runs=True,
dag=None,
):
"""
Clears a set of task instances, but makes sure the running ones
get killed.
:param tis: a list of task instances
:param session: current session
:param activate_dag_runs: flag to check for active dag run
:param dag: DAG object
"""
job_ids = []
for ti in tis:
if ti.state == State.RUNNING:
if ti.job_id:
ti.state = State.SHUTDOWN
job_ids.append(ti.job_id)
else:
task_id = ti.task_id
if dag and dag.has_task(task_id):
task = dag.get_task(task_id)
task_retries = task.retries
ti.max_tries = ti.try_number + task_retries - 1
else:
# Ignore errors when updating max_tries if dag is None or
# task not found in dag since database records could be
# outdated. We make max_tries the maximum value of its
# original max_tries or the current task try number.
ti.max_tries = max(ti.max_tries, ti.try_number - 1)
ti.state = State.NONE
session.merge(ti)
if job_ids:
from airflow.jobs import BaseJob as BJ
for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all():
job.state = State.SHUTDOWN
if activate_dag_runs and tis:
from airflow.models.dagrun import DagRun # Avoid circular import
drs = session.query(DagRun).filter(
DagRun.dag_id.in_({ti.dag_id for ti in tis}),
DagRun.execution_date.in_({ti.execution_date for ti in tis}),
).all()
for dr in drs:
dr.state = State.RUNNING
dr.start_date = timezone.utcnow() | def clear_task_instances(tis,
session,
activate_dag_runs=True,
dag=None,
):
job_ids = []
for ti in tis:
if ti.state == State.RUNNING:
if ti.job_id:
ti.state = State.SHUTDOWN
job_ids.append(ti.job_id)
else:
task_id = ti.task_id
if dag and dag.has_task(task_id):
task = dag.get_task(task_id)
task_retries = task.retries
ti.max_tries = ti.try_number + task_retries - 1
else:
# Ignore errors when updating max_tries if dag is None or
# task not found in dag since database records could be
# outdated. We make max_tries the maximum value of its
# original max_tries or the current task try number.
ti.max_tries = max(ti.max_tries, ti.try_number - 1)
ti.state = State.NONE
session.merge(ti)
if job_ids:
from airflow.jobs import BaseJob as BJ
for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all():
job.state = State.SHUTDOWN
if activate_dag_runs and tis:
from airflow.models.dagrun import DagRun # Avoid circular import
drs = session.query(DagRun).filter(
DagRun.dag_id.in_({ti.dag_id for ti in tis}),
DagRun.execution_date.in_({ti.execution_date for ti in tis}),
).all()
for dr in drs:
dr.state = State.RUNNING
dr.start_date = timezone.utcnow() | Clears a set of task instances, but makes sure the running ones
get killed.
:param tis: a list of task instances
:param session: current session
:param activate_dag_runs: flag to check for active dag run
:param dag: DAG object |
def try_number(self):
"""
Return the try number that this task number will be when it is actually
run.
If the TI is currently running, this will match the column in the
databse, in all othercases this will be incremenetd
"""
# This is designed so that task logs end up in the right file.
if self.state == State.RUNNING:
return self._try_number
return self._try_number + 1 | def try_number(self):
# This is designed so that task logs end up in the right file.
if self.state == State.RUNNING:
return self._try_number
return self._try_number + 1 | Return the try number that this task number will be when it is actually
run.
If the TI is currently running, this will match the column in the
databse, in all othercases this will be incremenetd |
def generate_command(dag_id,
task_id,
execution_date,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
file_path=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None
):
"""
Generates the shell command required to execute this task instance.
:param dag_id: DAG ID
:type dag_id: unicode
:param task_id: Task ID
:type task_id: unicode
:param execution_date: Execution date for the task
:type execution_date: datetime
:param mark_success: Whether to mark the task as successful
:type mark_success: bool
:param ignore_all_deps: Ignore all ignorable dependencies.
Overrides the other ignore_* parameters.
:type ignore_all_deps: bool
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs
(e.g. for Backfills)
:type ignore_depends_on_past: bool
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past
and trigger rule
:type ignore_task_deps: bool
:param ignore_ti_state: Ignore the task instance's previous failure/success
:type ignore_ti_state: bool
:param local: Whether to run the task locally
:type local: bool
:param pickle_id: If the DAG was serialized to the DB, the ID
associated with the pickled DAG
:type pickle_id: unicode
:param file_path: path to the file containing the DAG definition
:param raw: raw mode (needs more details)
:param job_id: job ID (needs more details)
:param pool: the Airflow pool that the task should run in
:type pool: unicode
:param cfg_path: the Path to the configuration file
:type cfg_path: basestring
:return: shell command that can be used to run the task instance
"""
iso = execution_date.isoformat()
cmd = ["airflow", "run", str(dag_id), str(task_id), str(iso)]
cmd.extend(["--mark_success"]) if mark_success else None
cmd.extend(["--pickle", str(pickle_id)]) if pickle_id else None
cmd.extend(["--job_id", str(job_id)]) if job_id else None
cmd.extend(["-A"]) if ignore_all_deps else None
cmd.extend(["-i"]) if ignore_task_deps else None
cmd.extend(["-I"]) if ignore_depends_on_past else None
cmd.extend(["--force"]) if ignore_ti_state else None
cmd.extend(["--local"]) if local else None
cmd.extend(["--pool", pool]) if pool else None
cmd.extend(["--raw"]) if raw else None
cmd.extend(["-sd", file_path]) if file_path else None
cmd.extend(["--cfg_path", cfg_path]) if cfg_path else None
return cmd | def generate_command(dag_id,
task_id,
execution_date,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
file_path=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None
):
iso = execution_date.isoformat()
cmd = ["airflow", "run", str(dag_id), str(task_id), str(iso)]
cmd.extend(["--mark_success"]) if mark_success else None
cmd.extend(["--pickle", str(pickle_id)]) if pickle_id else None
cmd.extend(["--job_id", str(job_id)]) if job_id else None
cmd.extend(["-A"]) if ignore_all_deps else None
cmd.extend(["-i"]) if ignore_task_deps else None
cmd.extend(["-I"]) if ignore_depends_on_past else None
cmd.extend(["--force"]) if ignore_ti_state else None
cmd.extend(["--local"]) if local else None
cmd.extend(["--pool", pool]) if pool else None
cmd.extend(["--raw"]) if raw else None
cmd.extend(["-sd", file_path]) if file_path else None
cmd.extend(["--cfg_path", cfg_path]) if cfg_path else None
return cmd | Generates the shell command required to execute this task instance.
:param dag_id: DAG ID
:type dag_id: unicode
:param task_id: Task ID
:type task_id: unicode
:param execution_date: Execution date for the task
:type execution_date: datetime
:param mark_success: Whether to mark the task as successful
:type mark_success: bool
:param ignore_all_deps: Ignore all ignorable dependencies.
Overrides the other ignore_* parameters.
:type ignore_all_deps: bool
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs
(e.g. for Backfills)
:type ignore_depends_on_past: bool
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past
and trigger rule
:type ignore_task_deps: bool
:param ignore_ti_state: Ignore the task instance's previous failure/success
:type ignore_ti_state: bool
:param local: Whether to run the task locally
:type local: bool
:param pickle_id: If the DAG was serialized to the DB, the ID
associated with the pickled DAG
:type pickle_id: unicode
:param file_path: path to the file containing the DAG definition
:param raw: raw mode (needs more details)
:param job_id: job ID (needs more details)
:param pool: the Airflow pool that the task should run in
:type pool: unicode
:param cfg_path: the Path to the configuration file
:type cfg_path: basestring
:return: shell command that can be used to run the task instance |
def current_state(self, session=None):
"""
Get the very latest state from the database, if a session is passed,
we use and looking up the state becomes part of the session, otherwise
a new session is used.
"""
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date,
).all()
if ti:
state = ti[0].state
else:
state = None
return state | def current_state(self, session=None):
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date,
).all()
if ti:
state = ti[0].state
else:
state = None
return state | Get the very latest state from the database, if a session is passed,
we use and looking up the state becomes part of the session, otherwise
a new session is used. |
def error(self, session=None):
"""
Forces the task instance's state to FAILED in the database.
"""
self.log.error("Recording the task instance as FAILED")
self.state = State.FAILED
session.merge(self)
session.commit() | def error(self, session=None):
self.log.error("Recording the task instance as FAILED")
self.state = State.FAILED
session.merge(self)
session.commit() | Forces the task instance's state to FAILED in the database. |
def refresh_from_db(self, session=None, lock_for_update=False):
"""
Refreshes the task instance from the database based on the primary key
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
if ti:
self.state = ti.state
self.start_date = ti.start_date
self.end_date = ti.end_date
# Get the raw value of try_number column, don't read through the
# accessor here otherwise it will be incremeneted by one already.
self.try_number = ti._try_number
self.max_tries = ti.max_tries
self.hostname = ti.hostname
self.pid = ti.pid
self.executor_config = ti.executor_config
else:
self.state = None | def refresh_from_db(self, session=None, lock_for_update=False):
TI = TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
if ti:
self.state = ti.state
self.start_date = ti.start_date
self.end_date = ti.end_date
# Get the raw value of try_number column, don't read through the
# accessor here otherwise it will be incremeneted by one already.
self.try_number = ti._try_number
self.max_tries = ti.max_tries
self.hostname = ti.hostname
self.pid = ti.pid
self.executor_config = ti.executor_config
else:
self.state = None | Refreshes the task instance from the database based on the primary key
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed. |
def clear_xcom_data(self, session=None):
"""
Clears all XCom data from the database for the task instance
"""
session.query(XCom).filter(
XCom.dag_id == self.dag_id,
XCom.task_id == self.task_id,
XCom.execution_date == self.execution_date
).delete()
session.commit() | def clear_xcom_data(self, session=None):
session.query(XCom).filter(
XCom.dag_id == self.dag_id,
XCom.task_id == self.task_id,
XCom.execution_date == self.execution_date
).delete()
session.commit() | Clears all XCom data from the database for the task instance |
def key(self):
"""
Returns a tuple that identifies the task instance uniquely
"""
return self.dag_id, self.task_id, self.execution_date, self.try_number | def key(self):
return self.dag_id, self.task_id, self.execution_date, self.try_number | Returns a tuple that identifies the task instance uniquely |
def are_dependents_done(self, session=None):
"""
Checks whether the dependents of this task instance have all succeeded.
This is meant to be used by wait_for_downstream.
This is useful when you do not want to start processing the next
schedule of a task until the dependents are done. For instance,
if the task DROPs and recreates a table.
"""
task = self.task
if not task.downstream_task_ids:
return True
ti = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id.in_(task.downstream_task_ids),
TaskInstance.execution_date == self.execution_date,
TaskInstance.state == State.SUCCESS,
)
count = ti[0][0]
return count == len(task.downstream_task_ids) | def are_dependents_done(self, session=None):
task = self.task
if not task.downstream_task_ids:
return True
ti = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id.in_(task.downstream_task_ids),
TaskInstance.execution_date == self.execution_date,
TaskInstance.state == State.SUCCESS,
)
count = ti[0][0]
return count == len(task.downstream_task_ids) | Checks whether the dependents of this task instance have all succeeded.
This is meant to be used by wait_for_downstream.
This is useful when you do not want to start processing the next
schedule of a task until the dependents are done. For instance,
if the task DROPs and recreates a table. |
def next_retry_datetime(self):
"""
Get datetime of the next retry if the task instance fails. For exponential
backoff, retry_delay is used as base and will be converted to seconds.
"""
delay = self.task.retry_delay
if self.task.retry_exponential_backoff:
min_backoff = int(delay.total_seconds() * (2 ** (self.try_number - 2)))
# deterministic per task instance
hash = int(hashlib.sha1("{}#{}#{}#{}".format(self.dag_id,
self.task_id,
self.execution_date,
self.try_number)
.encode('utf-8')).hexdigest(), 16)
# between 0.5 * delay * (2^retry_number) and 1.0 * delay * (2^retry_number)
modded_hash = min_backoff + hash % min_backoff
# timedelta has a maximum representable value. The exponentiation
# here means this value can be exceeded after a certain number
# of tries (around 50 if the initial delay is 1s, even fewer if
# the delay is larger). Cap the value here before creating a
# timedelta object so the operation doesn't fail.
delay_backoff_in_seconds = min(
modded_hash,
timedelta.max.total_seconds() - 1
)
delay = timedelta(seconds=delay_backoff_in_seconds)
if self.task.max_retry_delay:
delay = min(self.task.max_retry_delay, delay)
return self.end_date + delay | def next_retry_datetime(self):
delay = self.task.retry_delay
if self.task.retry_exponential_backoff:
min_backoff = int(delay.total_seconds() * (2 ** (self.try_number - 2)))
# deterministic per task instance
hash = int(hashlib.sha1("{}#{}#{}#{}".format(self.dag_id,
self.task_id,
self.execution_date,
self.try_number)
.encode('utf-8')).hexdigest(), 16)
# between 0.5 * delay * (2^retry_number) and 1.0 * delay * (2^retry_number)
modded_hash = min_backoff + hash % min_backoff
# timedelta has a maximum representable value. The exponentiation
# here means this value can be exceeded after a certain number
# of tries (around 50 if the initial delay is 1s, even fewer if
# the delay is larger). Cap the value here before creating a
# timedelta object so the operation doesn't fail.
delay_backoff_in_seconds = min(
modded_hash,
timedelta.max.total_seconds() - 1
)
delay = timedelta(seconds=delay_backoff_in_seconds)
if self.task.max_retry_delay:
delay = min(self.task.max_retry_delay, delay)
return self.end_date + delay | Get datetime of the next retry if the task instance fails. For exponential
backoff, retry_delay is used as base and will be converted to seconds. |
def ready_for_retry(self):
"""
Checks on whether the task instance is in the right state and timeframe
to be retried.
"""
return (self.state == State.UP_FOR_RETRY and
self.next_retry_datetime() < timezone.utcnow()) | def ready_for_retry(self):
return (self.state == State.UP_FOR_RETRY and
self.next_retry_datetime() < timezone.utcnow()) | Checks on whether the task instance is in the right state and timeframe
to be retried. |
def pool_full(self, session):
"""
Returns a boolean as to whether the slot pool has room for this
task to run
"""
if not self.task.pool:
return False
pool = (
session
.query(Pool)
.filter(Pool.pool == self.task.pool)
.first()
)
if not pool:
return False
open_slots = pool.open_slots(session=session)
return open_slots <= 0 | def pool_full(self, session):
if not self.task.pool:
return False
pool = (
session
.query(Pool)
.filter(Pool.pool == self.task.pool)
.first()
)
if not pool:
return False
open_slots = pool.open_slots(session=session)
return open_slots <= 0 | Returns a boolean as to whether the slot pool has room for this
task to run |
def get_dagrun(self, session):
"""
Returns the DagRun for this TaskInstance
:param session:
:return: DagRun
"""
from airflow.models.dagrun import DagRun # Avoid circular import
dr = session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == self.execution_date
).first()
return dr | def get_dagrun(self, session):
from airflow.models.dagrun import DagRun # Avoid circular import
dr = session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == self.execution_date
).first()
return dr | Returns the DagRun for this TaskInstance
:param session:
:return: DagRun |
def xcom_push(
self,
key,
value,
execution_date=None):
"""
Make an XCom available for tasks to pull.
:param key: A key for the XCom
:type key: str
:param value: A value for the XCom. The value is pickled and stored
in the database.
:type value: any pickleable object
:param execution_date: if provided, the XCom will not be visible until
this date. This can be used, for example, to send a message to a
task on a future date without it being immediately visible.
:type execution_date: datetime
"""
if execution_date and execution_date < self.execution_date:
raise ValueError(
'execution_date can not be in the past (current '
'execution_date is {}; received {})'.format(
self.execution_date, execution_date))
XCom.set(
key=key,
value=value,
task_id=self.task_id,
dag_id=self.dag_id,
execution_date=execution_date or self.execution_date) | def xcom_push(
self,
key,
value,
execution_date=None):
if execution_date and execution_date < self.execution_date:
raise ValueError(
'execution_date can not be in the past (current '
'execution_date is {}; received {})'.format(
self.execution_date, execution_date))
XCom.set(
key=key,
value=value,
task_id=self.task_id,
dag_id=self.dag_id,
execution_date=execution_date or self.execution_date) | Make an XCom available for tasks to pull.
:param key: A key for the XCom
:type key: str
:param value: A value for the XCom. The value is pickled and stored
in the database.
:type value: any pickleable object
:param execution_date: if provided, the XCom will not be visible until
this date. This can be used, for example, to send a message to a
task on a future date without it being immediately visible.
:type execution_date: datetime |
def xcom_pull(
self,
task_ids=None,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=False):
"""
Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:type key: str
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: str or iterable of strings (representing task_ids)
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: str
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool
"""
if dag_id is None:
dag_id = self.dag_id
pull_fn = functools.partial(
XCom.get_one,
execution_date=self.execution_date,
key=key,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
if is_container(task_ids):
return tuple(pull_fn(task_id=t) for t in task_ids)
else:
return pull_fn(task_id=task_ids) | def xcom_pull(
self,
task_ids=None,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=False):
if dag_id is None:
dag_id = self.dag_id
pull_fn = functools.partial(
XCom.get_one,
execution_date=self.execution_date,
key=key,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
if is_container(task_ids):
return tuple(pull_fn(task_id=t) for t in task_ids)
else:
return pull_fn(task_id=task_ids) | Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:type key: str
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: str or iterable of strings (representing task_ids)
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: str
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool |
def close(self):
"""
Close and upload local log file to remote storage Wasb.
"""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
with open(local_loc, 'r') as logfile:
log = logfile.read()
self.wasb_write(log, remote_loc, append=True)
if self.delete_local_copy:
shutil.rmtree(os.path.dirname(local_loc))
# Mark closed so we don't double write if close is called twice
self.closed = True | def close(self):
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
with open(local_loc, 'r') as logfile:
log = logfile.read()
self.wasb_write(log, remote_loc, append=True)
if self.delete_local_copy:
shutil.rmtree(os.path.dirname(local_loc))
# Mark closed so we don't double write if close is called twice
self.closed = True | Close and upload local log file to remote storage Wasb. |
def get_conn(self):
"""
Retrieves connection to Google Compute Engine.
:return: Google Compute Engine services object
:rtype: dict
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build('compute', self.api_version,
http=http_authorized, cache_discovery=False)
return self._conn | def get_conn(self):
if not self._conn:
http_authorized = self._authorize()
self._conn = build('compute', self.api_version,
http=http_authorized, cache_discovery=False)
return self._conn | Retrieves connection to Google Compute Engine.
:return: Google Compute Engine services object
:rtype: dict |
def start_instance(self, zone, resource_id, project_id=None):
"""
Starts an existing instance defined by project_id, zone and resource_id.
Must be called with keyword arguments rather than positional.
:param zone: Google Cloud Platform zone where the instance exists
:type zone: str
:param resource_id: Name of the Compute Engine instance resource
:type resource_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().instances().start(
project=project_id,
zone=zone,
instance=resource_id
).execute(num_retries=self.num_retries)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(
"Wrong response '{}' returned - it should contain "
"'name' field".format(response))
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name,
zone=zone) | def start_instance(self, zone, resource_id, project_id=None):
response = self.get_conn().instances().start(
project=project_id,
zone=zone,
instance=resource_id
).execute(num_retries=self.num_retries)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(
"Wrong response '{}' returned - it should contain "
"'name' field".format(response))
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name,
zone=zone) | Starts an existing instance defined by project_id, zone and resource_id.
Must be called with keyword arguments rather than positional.
:param zone: Google Cloud Platform zone where the instance exists
:type zone: str
:param resource_id: Name of the Compute Engine instance resource
:type resource_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:return: None |
def set_machine_type(self, zone, resource_id, body, project_id=None):
"""
Sets machine type of an instance defined by project_id, zone and resource_id.
Must be called with keyword arguments rather than positional.
:param zone: Google Cloud Platform zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource
:type resource_id: str
:param body: Body required by the Compute Engine setMachineType API,
as described in
https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType
:type body: dict
:param project_id: Optional, Google Cloud Platform project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self._execute_set_machine_type(zone, resource_id, body, project_id)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(
"Wrong response '{}' returned - it should contain "
"'name' field".format(response))
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name,
zone=zone) | def set_machine_type(self, zone, resource_id, body, project_id=None):
response = self._execute_set_machine_type(zone, resource_id, body, project_id)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(
"Wrong response '{}' returned - it should contain "
"'name' field".format(response))
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name,
zone=zone) | Sets machine type of an instance defined by project_id, zone and resource_id.
Must be called with keyword arguments rather than positional.
:param zone: Google Cloud Platform zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource
:type resource_id: str
:param body: Body required by the Compute Engine setMachineType API,
as described in
https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType
:type body: dict
:param project_id: Optional, Google Cloud Platform project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:return: None |
def get_instance_template(self, resource_id, project_id=None):
"""
Retrieves instance template by project_id and resource_id.
Must be called with keyword arguments rather than positional.
:param resource_id: Name of the instance template
:type resource_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:return: Instance template representation as object according to
https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates
:rtype: dict
"""
response = self.get_conn().instanceTemplates().get(
project=project_id,
instanceTemplate=resource_id
).execute(num_retries=self.num_retries)
return response | def get_instance_template(self, resource_id, project_id=None):
response = self.get_conn().instanceTemplates().get(
project=project_id,
instanceTemplate=resource_id
).execute(num_retries=self.num_retries)
return response | Retrieves instance template by project_id and resource_id.
Must be called with keyword arguments rather than positional.
:param resource_id: Name of the instance template
:type resource_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:return: Instance template representation as object according to
https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates
:rtype: dict |
def insert_instance_template(self, body, request_id=None, project_id=None):
"""
Inserts instance template using body specified
Must be called with keyword arguments rather than positional.
:param body: Instance template representation as object according to
https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates
:type body: dict
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:type request_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().instanceTemplates().insert(
project=project_id,
body=body,
requestId=request_id
).execute(num_retries=self.num_retries)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(
"Wrong response '{}' returned - it should contain "
"'name' field".format(response))
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name) | def insert_instance_template(self, body, request_id=None, project_id=None):
response = self.get_conn().instanceTemplates().insert(
project=project_id,
body=body,
requestId=request_id
).execute(num_retries=self.num_retries)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(
"Wrong response '{}' returned - it should contain "
"'name' field".format(response))
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name) | Inserts instance template using body specified
Must be called with keyword arguments rather than positional.
:param body: Instance template representation as object according to
https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates
:type body: dict
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:type request_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:return: None |