filename
stringlengths
4
198
content
stringlengths
25
939k
environment
sequence
variablearg
sequence
constarg
sequence
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
dialog_test/flowtest_v1.py
from watson_developer_cloud import ConversationV1 import pandas as pd import re import json import os class FlowTestV1: """ Conversational Flow testing """ anyFails = False templateColList = [ 'User Input', 'Match Output','Match Intent','Match Entity', 'Alternate Intents?','Intents Object','Entities Object','Context Variables','System Object' ] reportColList1 = ['User Input', 'Output Text', 'Match Output','Match Intent','Match Entity', 'Matched Output', 'Matched Intent', 'Matched Entity', 'Matched Context'] reportColIntentList = ['Recall@'] reportColList2 = ['Intent', 'Confidence', 'Entities', 'Alternate Intents', 'Conversation ID', 'Context' ] # Have to do this to sort correctly. reportIntentList = ['Intent1', 'Confidence1', 'Intent2', 'Confidence2', 'Intent3', 'Confidence3', 'Intent4', 'Confidence4', 'Intent5', 'Confidence5', 'Intent6', 'Confidence6', 'Intent7', 'Confidence7', 'Intent8', 'Confidence8', 'Intent9', 'Confidence9', 'Intent10', 'Confidence10' ] def __init__(self, **kwargs): self.ctk = ConversationV1(**kwargs) def reportFailure(self): print("************************* FAIL ****************************") self.anyFails = True def lastTestResult(self): if self.anyFails == False: print("******************** LAST TEST PASSED *********************") else: print("******************** LAST TEST FAILED *********************") self.anyFails = False def createBlankTemplate(self): """ Creates a blank dataframe flow template, that can be used to write your test scripts with. """ df = pd.DataFrame(columns=list(self.templateColList)) return df def createBlankReport(self, alternate_intents=False): """ Creates a blank report dataframe """ df = pd.DataFrame(columns=list(self.reportColList1 + self.reportColList2),) if alternate_intents: df = self.createAlternateIntentsColumns(df) return df def createAlternateIntentsColumns(self, df): if 'Intent1' in df.columns: return df df = df.append(pd.DataFrame(columns=list(self.reportIntentList))) cols = list(self.reportColList1) + list(self.reportColList2) + list(self.reportIntentList) df.columns = cols return df def jsonDumpFlowTest(self,workspace_id=None, flow=None, user_goes_first=False, show_progress=True, alternate_intents=True, version="1.x"): """ This will return an array of JSON records instead of a report :param workspace_id Required. The workspace to test against. :param flow Required. The dataframe containing the test information. """ results = [] context = {} default_intents = alternate_intents if user_goes_first: payload = { 'text': '' } r = self.ctk.message(workspace_id=workspace_id, input=payload, context=context) if version.startswith('2.'): r = r.get_result() context = r['context'] results.append(r) for index, row in flow.iterrows(): # @UnusedVariable if show_progress: print('{} {}'.format(index, row['User Input'])) if row['User Input'] == 'NEWCONVERSATION': context = {} if row['Context Variables'] != '': context.update(row['Context Variables']) if user_goes_first: payload = { 'text': '' } r = self.ctk.message(workspace_id=workspace_id, input=payload, context=context) if version.startswith('2.'): r = r.get_result() context = r['context'] results.append(r) else: if row['Alternate Intents'] == '': ai = default_intents else: ai = row['Alternate Intents'] payload = { 'text' : row['User Input'] } # Build context variables. if row['Context Variables'] != '': cv = json.loads(row['Context Variables']) context.update(cv) r = self.ctk.message(workspace_id=workspace_id, input=payload, context=context, alternate_intents=ai) if version.startswith('2.'): r = r.get_result() context = r['context'] results.append(r) return results def runFlowTest(self, workspace_id=None, flow=None,json_dump=False,alternate_intents=True, intent_per_row=False, user_goes_first=False, show_progress=True, version='1.x'): flow = flow.fillna('') if json_dump == True: return self.jsonDumpFlowTest(workspace_id, flow) context = {} default_intents = alternate_intents df = self.createBlankReport() if alternate_intents: df = self.createAlternateIntentsColumns(df) innerText = '' if user_goes_first: payload = { 'text': '' } r = self.ctk.message(workspace_id=workspace_id, input=payload, context=context) if version.startswith('2.'): r = r.get_result() context = r['context'] record = { 'User Input': '', 'Output Text': '\n'.join(r['output']['text']), 'Context': r['context'] } df = df.append(record,ignore_index=True) for index, row in flow.iterrows(): # @UnusedVariable if show_progress: print('{} {}'.format(index, row['User Input'])) if row['User Input'] == 'NEWCONVERSATION': self.lastTestResult() workspace_id=os.environ["WORKSPACE_ID"] context = {} if row['Context Variables'] != '': context.update(row['Context Variables']) if user_goes_first: payload = { 'text': '' } r = self.ctk.message(workspace_id=workspace_id, input=payload, context=context) if version.startswith('2.'): r = r.get_result() context = r['context'] df = df.append({ 'User Input': 'NEWCONVERSATION', 'Context': context },ignore_index=True) else: if row['Alternate Intents'] == '': ai = default_intents else: ai = row['Alternate Intents'] payload = { 'text' : row['User Input'] } # Build context variables. if row['Context Variables'] != '': cv = json.loads(row['Context Variables']) context.update(cv) r = self.ctk.message(workspace_id=workspace_id, input=payload, context=context, alternate_intents=ai) if version.startswith('2.'): r = r.get_result() context = r['context'] if 'text' in r['output']: innerText = r['output']['text'] if row['Match Output'] != '': matchedOutput = bool(re.search(row['Match Output'], '\n'.join(innerText))) if matchedOutput == False: self.reportFailure() else: matchedOutput = "n/a" if len(r['intents']) > 0: matchedIntent = bool(re.search(row['Match Intent'], r['intents'][0]['intent'])) if matchedIntent == False: self.reportFailure() else: matchedIntent = "n/a" entity_string = '' if len(r['entities']) > 0: for e in r['entities']: entity_string = u'{} {}:{}{}'.format(entity_string, e['entity'], e['value'], e['location']) if row['Match Entity'] != '': matchedEntity = bool(re.search('.*?{}.*?'.format(row['Match Entity']), entity_string)) if matchedEntity == False: self.reportFailure() else: matchedEntity = "n/a" # If your orchestrator ever changes Watson Assistant workspaces, detect and reflect that here # workspace_id = new_workspace_id # del context['system'] # del context['action'] # print('\tJumped to workspace {}'.format(workspace_id)) record = { 'User Input': row['User Input'], 'Output Text': '\n'.join(r['output']['text']), 'Alternate Intents': ai, 'Conversation ID': r['context']['conversation_id'], 'Context': r['context'], 'Match Output': row['Match Output'], 'Match Intent': row['Match Intent'], 'Match Entity': row['Match Entity'], 'Matched Output': matchedOutput, 'Matched Intent': matchedIntent, 'Matched Entity': matchedEntity, #'Matched Context': matchedContext, 'Entities': entity_string } if ai: df = self.createAlternateIntentsColumns(df) for i in range(0,len(r['intents'])): record.update({ 'Intent{}'.format(i+1): r['intents'][i]['intent'], 'Confidence{}'.format(i+1): r['intents'][i]['confidence'] }) if len(r['intents']) > 0: record.update({ 'Intent': r['intents'][0]['intent'], 'Confidence': r['intents'][0]['confidence'] }) df = df.append(record,ignore_index=True) self.lastTestResult() df = df.fillna('') if intent_per_row: return self.convertReportToIntentPerRow(df) return df def convertReportToIntentPerRow(self, report=None,input_all_lines=True): """ Converts a Report file to row by Recall@ (Intent). Will do nothing if alternate_intents was never set. :param report The Dataframe containing the report """ cols = list(self.reportColList1) + list(self.reportColIntentList) + list(self.reportColList2) df = pd.DataFrame(columns=list(cols)) for index, row in report.iterrows(): # @UnusedVariable record = row for i in range(1,11): if input_all_lines == False and i > 1: record['User Input'] = '' record['Recall@'] = i record['Intent'] = row['Intent{}'.format(i)] record['Confidence'] = row['Confidence{}'.format(i)] # Match against current. if record['Match Intent'] != '': record['Matched Intent'] = bool(re.search(row['Match Intent'], record['Intent{}'.format(i)])) else: record['Matched Intent'] = False df = df.append(record, ignore_index=True) for i in range(1,11): df = df.drop('Intent{}'.format(i),1) df = df.drop('Confidence{}'.format(i),1) return df
[]
[]
[ "WORKSPACE_ID" ]
[]
["WORKSPACE_ID"]
python
1
0
_archive_Flask_AWS/SOUNDDRIP/models/predict.py
import spotipy from sklearn.neighbors import NearestNeighbors from sklearn.preprocessing import MinMaxScaler, Normalizer import pandas as pd from pandas.io.json import json_normalize from joblib import load, dump import pickle import numpy as np import psycopg2 as ps from misc.env_vars import * import sys from more_itertools import unique_everseen import os #importing global environment variables from Elastic Beanstalk os.environ.get('FLASK_ENV') #setting appropriate database table if FLASK_ENV == 'production': db_table = 'recommendations' elif FLASK_ENV == 'development': db_table = 'recommendations_dev' class Sound_Drip: ''' The Sound_Drip object contains all of the code necessary to output automated results via the flask endpoint specified in applications.py Rather than calling each method individually, object instantiation sequentially produces all necessary steps for the final prediction output for the endpoint. A separate child class called Slider can be found below addressing the slider functionality allowing users to receive song predictions by sending the endpoint acoustical features. As of March 2020, this feature has yet to be implemented. init Attributes ---------- token : string, passed in from POST request as JSON object. See appliction.py for more details for initiating object instance. The is the session user token passed to Node upon user sign in. sp : object Spotipy module object used throughout the Class to communicate directly with Spotify's API. user_id : string Spotify user id of user display_name : string Spotify display name of user stale_seed_list : list List containing tracks that the Sound Drip application already used to make a prediction. stale_results_list : list List containing tracks that the Sound Drip application already provided as song recommendations. song_id : string Spotify song ID of the seed used to output the prediction source_genre: list Spotify list containing genre/s from the seed used to output the prediction acoustical_features: dictionary Contains list of all relevant acoustical features from the Spotify API for the seed track popularity: integer Integer indicating popularity attribute of seed track from Spotify API song_features_df : pandas dataframe 1 dimension dataframe containing all relevant features of the seed track for making the prediction results: list of integers List of results from the ML model inference. Results are in the form of indexes corresponding to the master song list dataframe. filtered_list : list of integers List of results filtered by genre matching against the source_genre attribute. Results are in the form of indexes corresponding to the master song list dataframe. song_id_predictions : list of strings List of 20 song ids corresponding to the index list passed in from the filtered_list attribute. inser_user_predictions() : class method see class method definition for details ''' def __init__(self, token): self.token = token self.sp = spotipy.Spotify(auth=self.token) self.user_id, self.display_name = self.get_user_ids() self.stale_seed_list = self.get_stale_seed() self.stale_results_list = self.get_stale_results() self.song_id, self.source_genre = self.get_user_song_id_source_genre() self.acoustical_features = self.get_acoustical_features(self.song_id) self.popularity = self.get_popularity(self.song_id) self.song_features_df = self.create_feature_object( self.popularity, self.acoustical_features) self.results = self.get_results(self.song_features_df) self.filtered_list = self.filter_model(self.results, self.source_genre) self.song_id_predictions = self.song_id_prediction_output( self.filtered_list) self.insert_user_predictions(), print("predicts inserted into db") def get_user_ids(self): ''' Retrieves user id from Spotfiy API Returns user_id, and display_name (display_name is for the database) ''' current_user_dict = self.sp.current_user() display_name = current_user_dict['display_name'] user_id = current_user_dict['id'] print("retrieving user id and display name for current token") return user_id, display_name def get_user_song_id_source_genre(self): ''' Retrieves current user's last 50 liked tracks from Spotify and loops through all tracks to find track not previously used for prediction. Tracks that do not contain a genre in the API (= []) are also skipped. If all retrieved liked tracks have already been used for the prediction, the most recently liked track is used as the seed for the application. Returns seed track song_id and genre to be used in prediction. ''' stale_songs = self.stale_seed_list results = self.sp.current_user_saved_tracks(limit=50) for song_number in range(0, len(results['items'])): print(song_number) song_id = results['items'][song_number]['track']['id'] print(song_id) if song_id not in stale_songs: artist_id = self.get_artist_id(song_id) genre = self.get_genres(artist_id) print(genre) if genre != []: break else: continue else: if song_number == len(results['items']) - 1: print("application out of fresh seeds") for song_id in stale_songs: artist_id = self.get_artist_id(song_id) genre = self.get_genres(artist_id) if genre != []: break else: continue return song_id, genre def get_acoustical_features(self, song_id): ''' Retrieves the acousitcal features of the song_id passed in. Song_id is technically defined outside of the method. Song_id is pulled from the spotify API Returns a dictionary object of with key, val for each feature ''' acoustical_features = self.sp.audio_features(song_id)[0] return acoustical_features def get_popularity(self, song_id): ''' Retrieves popularity score from Spotfiy API ''' popularity = self.sp.track(song_id)['popularity'] return popularity def get_artist_id(self, song_id): ''' Retrieves artist id from Spotify API. ''' artist = self.sp.track(song_id)['artists'][0]['id'] return artist def get_genres(self, artist): ''' Retrieves genre of artist from spotify API. ''' genre = self.sp.artist(artist)['genres'] return genre def create_feature_object(self, popularity, acoustical_features): ''' Combines previously retrieved features into the dictionary format needed for the get_results method. Features are sorted in alphabetical order before being returned to match the order of the features that the model was trained on. ''' popularity_dict = {'popularity': popularity} song_features = acoustical_features song_features.update(popularity_dict) song_features = { "audio_features": { key: song_features[key] for key in song_features.keys() & { 'popularity', 'acousticness', 'danceability', 'energy', 'instrumentalness', 'key', 'liveness', 'loudness', 'mode', 'speechiness', 'tempo', 'time_signature', 'valence'}}} df = pd.DataFrame.from_dict( json_normalize( song_features["audio_features"]), orient='columns') df = df.reindex(sorted(df.columns), axis=1) return df def get_results(self, song_features_df): ''' 1) Loads in previously fitted scaler (MinMax). 1a) MinMax scalar import is the scaler in question 2) Song_features_df is scaled. 3) Scaled data is normalized 4) Previously trained KNN model is loaded 5) Predictions are made on the normalized array from the song features 6) Returns results object (5000 nearest neighbors from KNN model are returned) ''' scaler = load("./models/scalar3.joblib") print('Scaling data...') data_scaled = scaler.transform(song_features_df) normalizer = Normalizer() data_normalized = normalizer.fit_transform(data_scaled) print('Loading pickled model...') model = load('./models/model5.joblib') results = model.kneighbors([data_normalized][0])[1:] print('results returned') return results[0] def filter_model(self, model_results, source_genre_list): ''' 1) Load in genres array 1a) The genres array is a numpy array object which contains a list of genres in corresponding order to the song_list df 2) Raw model_results are loaded 3) Raw model_results are filtered through stale_results 3a) This is to ensure that the filter does not produce redundant predictions 4) Loops through output_song_index 4a) Output_song_index is member of model_results 5) Loops through output_genre_list 5a) Output_genre_list is returned from genre_array with output_song_index 6) Loops through source_genre_list 6a) Source_genre_list is a necessary **arg for this method 7) Condition for matching output_genre to source_genre 7a) if the condition is met, then the output_song_index is appended to the filtered list 8) Duplicates removed from filtered_list 9) 'if' statement is initiated 9a) Song_list_length specifies ultimate amount of predictions from application 10) If filtered list greater than or equal to pre-defined song_list length, the method returns the filtered_list 11) If filtered list is less than pre-defined song_list length, tracks from model_results that do not match the source_genre are appended to filtered_list 11a) This is done by looping through songs within the output_song_index and appending to the filtered_list 11b) Song_list length is being used here to ensure that the final list is returned appropriately 12) Returns filtered_list ''' # loop takes KNN results and filters by source track genres print(source_genre_list) print("filter for genres initiated") genre_array = pickle.load(open("./data/genres_array_2.pkl", "rb")) filtered_list = [] song_list_length = 20 stale_results = self.stale_results_list model_results_before = len(model_results[0][1:]) model_results = [index for index in model_results[0] [1:] if index not in stale_results] model_results_final = model_results_before - len(model_results) print(f'{model_results_final} stale tracks were removed for the user') for output_song_index in model_results: output_genre_list = genre_array[output_song_index] for output_genre in output_genre_list: output_genre = output_genre.strip(" ") for source_genre in source_genre_list: source_genre = "'" + source_genre + "'" if source_genre == output_genre: filtered_list.append(output_song_index) else: continue filtered_list = list(unique_everseen(filtered_list)) if len(filtered_list) >= song_list_length: print("filter found at least 20 genre matches") filtered_list = filtered_list[0:20] else: counter = song_list_length - len(filtered_list) print("length of filtered list:", len(filtered_list)) print(f'need to add {counter} items to final song output') for output_song_index in model_results: if output_song_index not in filtered_list: if counter > 0: filtered_list.append(output_song_index) counter -= 1 else: break print( f"filtered list with {len(filtered_list)} unique song indices returned") return filtered_list def song_id_prediction_output(self, filtered_list): ''' Retrieving the corresponding spotify song id's and outputting correct format for endpoint Returns song_result_output_dict for global endpoint, and song_id_and_index_dict for database method ''' similar_songs = [] song_id_list = [] print('song_id_list loading...') song_id_array = pickle.load(open('./data/song_id_array3.pkl', 'rb')) print('song_id_list loaded') for song_row in filtered_list: song_id = song_id_array[song_row] similar_songs.append({'similarity': [.99], 'values': song_id}) song_id_list.append(song_id) song_result_output_dict = {"songs": similar_songs} song_id_and_index_dict = { song_id: song_index for song_id, song_index in zip( song_id_list, filtered_list)} print("Results returned") return song_result_output_dict, song_id_and_index_dict def db_connect(self): ''' Method for opening a connection to db and creating cursor object ''' conn = ps.connect(host=POSTGRES_ADDRESS, database=POSTGRES_DBNAME, user=POSTGRES_USERNAME, password=POSTGRES_PASSWORD, port=POSTGRES_PORT) cur = conn.cursor() return conn, cur # def get_user_ids(self): # ''' # Retrieves user id from Spotfiy API # Returns user_id, and display_name (display_name is for the database) # ''' # current_user_dict = self.sp.current_user() # display_name = current_user_dict['display_name'] # user_id = current_user_dict['id'] # print("retrieving user id and display name for current token") # return user_id, display_name def insert_user_predictions(self): ''' Loops through song_id_predictions, inserting song_id, user information and song_index into db ''' try: conn, cur = self.db_connect() for song_id, song_index in self.song_id_predictions[1].items(): cur.execute( f'INSERT INTO {db_table}' '(userid,songid,songlistindex,seedsongid,recdate)' f' VALUES (\'{self.user_id}\',\'{song_id}\',\'{song_index}\',\'{self.song_id}\',current_timestamp);') conn.commit() conn.close() except ps.DatabaseError as e: print(f'Error {e}') sys.exit(1) finally: if conn: conn.close() def get_stale_results(self): ''' Retrieves indices corresponding to the master song list of tracks that have already been recommended to the specific user previously ''' try: conn, cur = self.db_connect() query = f'SELECT DISTINCT (songlistindex) FROM {db_table} WHERE userid = \'{self.user_id}\';' cur.execute(query) query_results = cur.fetchall() stale_results_list = [index[0] for index in query_results] except ps.DatabaseError as e: print(f'Error {e}') sys.exit(1) finally: if conn: conn.close() return stale_results_list def get_stale_seed(self): try: conn,cur = self.db_connect() query = f'SELECT DISTINCT (seedsongid) FROM {db_table} WHERE userid = \'{self.user_id}\' AND seedsongid is not null;' cur.execute(query) query_results = cur.fetchall() stale_results_list = [index[0] for index in query_results] except ps.DatabaseError as e: print(f'Error {e}') sys.exit(1) finally: if conn: conn.close() return stale_results_list class Slider(Sound_Drip): ''' A child class of the Sound_Drip parent class. Slider allows users to receive ong predictions by sending the endpoint acoustical features. As of March 2020, this feature has yet to be implemented. init Attributes ---------- slider_features = dictionary object features received from external POST request to the endpoint slider_features_df = pandas dataframe object converted to correct format for get_slider_results() method slider_results_list = list list with indices corresponding to dataframe song_ids as results slider_predictions = dictionary slider endpoint return object with Spotify API track ids ---------- ''' def __init__(self, slider_features): self.slider_features = slider_features self.slider_features_df = self.create_slider_feature_df( slider_features) self.slider_results_list = self.get_slider_results( self.slider_features_df)[0][0:20] self.slider_predictions = self.song_id_prediction_output( self.slider_results_list) def create_slider_feature_df(self, slider_features): df = pd.DataFrame.from_dict( json_normalize( self.slider_features["audio_features"]), orient='columns') df = df.reindex(sorted(df.columns), axis=1) return df def get_slider_results(self, song_features_df): scaler = load("./models/scalar3.joblib") print('Scaling data...') data_scaled = scaler.transform(song_features_df) normalizer = Normalizer() data_normalized = normalizer.fit_transform(data_scaled) print('Loading pickled model...') model = load('./models/slider_model6.joblib') results = model.kneighbors([data_normalized][0])[1:] print('results returned') return results[0]
[]
[]
[ "FLASK_ENV" ]
[]
["FLASK_ENV"]
python
1
0
lldb/packages/Python/lldbsuite/test/lldbpexpect.py
from __future__ import absolute_import # System modules import os import sys # Third-party modules import six # LLDB Modules import lldb from .lldbtest import * from . import lldbutil from lldbsuite.test.decorators import * @skipIfRemote @skipIfWindows # llvm.org/pr22274: need a pexpect replacement for windows class PExpectTest(TestBase): NO_DEBUG_INFO_TESTCASE = True PROMPT = "(lldb) " def expect_prompt(self): self.child.expect_exact(self.PROMPT) def launch(self, executable=None, extra_args=None, timeout=60, dimensions=None): logfile = getattr(sys.stdout, 'buffer', sys.stdout) if self.TraceOn() else None args = ['--no-lldbinit', '--no-use-colors'] for cmd in self.setUpCommands(): args += ['-O', cmd] if executable is not None: args += ['--file', executable] if extra_args is not None: args.extend(extra_args) env = dict(os.environ) env["TERM"]="vt100" import pexpect self.child = pexpect.spawn( lldbtest_config.lldbExec, args=args, logfile=logfile, timeout=timeout, dimensions=dimensions, env=env) self.expect_prompt() for cmd in self.setUpCommands(): self.child.expect_exact(cmd) self.expect_prompt() if executable is not None: self.child.expect_exact("target create") self.child.expect_exact("Current executable set to") self.expect_prompt() def expect(self, cmd, substrs=None): self.assertNotIn('\n', cmd) # If 'substrs' is a string then this code would just check that every # character of the string is in the output. assert not isinstance(substrs, six.string_types), \ "substrs must be a collection of strings" self.child.sendline(cmd) if substrs is not None: for s in substrs: self.child.expect_exact(s) self.expect_prompt() def quit(self, gracefully=True): self.child.sendeof() self.child.close(force=not gracefully) self.child = None def cursor_forward_escape_seq(self, chars_to_move): """ Returns the escape sequence to move the cursor forward/right by a certain amount of characters. """ return b"\x1b\[" + str(chars_to_move).encode("utf-8") + b"C"
[]
[]
[]
[]
[]
python
0
0
avrit_backend/avrit_backend/wsgi.py
""" WSGI config for avrit_backend project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'avrit_backend.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
src/test/java/scaffolding/MvnRunner.java
package scaffolding; import static java.util.Arrays.asList; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.IsNot.not; import java.io.File; import java.io.FileFilter; import java.io.IOException; import java.io.PrintStream; import java.util.Collections; import java.util.List; import java.util.Properties; import org.apache.commons.io.filefilter.DirectoryFileFilter; import org.apache.maven.shared.invoker.DefaultInvocationRequest; import org.apache.maven.shared.invoker.DefaultInvoker; import org.apache.maven.shared.invoker.InvocationRequest; import org.apache.maven.shared.invoker.InvocationResult; import org.apache.maven.shared.invoker.Invoker; import org.apache.maven.shared.invoker.MavenInvocationException; import org.apache.maven.shared.invoker.PrintStreamHandler; public class MvnRunner { private static boolean haveInstalledPlugin = false; private final File mvnHome; public boolean logToStandardOut = false; public MvnRunner() { this(null); } public MvnRunner(File mvnHome) { this.mvnHome = mvnHome; } public static MvnRunner mvn(String version) { System.out.println("Ensuring maven " + version + " is available"); MvnRunner mvnRunner = new MvnRunner(); String dirWithMavens = "target/mavens/" + version; mvnRunner.runMaven(new File("."), "-Dartifact=org.apache.maven:apache-maven:" + version + ":zip:bin", "-DmarkersDirectory=" + dirWithMavens, "-DoutputDirectory=" + dirWithMavens, "org.apache.maven.plugins:maven-dependency-plugin:2.10:unpack"); File mvnHome = new File(dirWithMavens).listFiles((FileFilter) DirectoryFileFilter.INSTANCE)[0]; System.out.println("Maven " + version + " available at " + mvnHome.getAbsolutePath()); return new MvnRunner(mvnHome); } public static void installReleasePluginToLocalRepo() { if (haveInstalledPlugin) { return; } long start = System.currentTimeMillis(); System.out.print("Installing the plugin into the local repo .. "); assertThat("Environment variable M2_HOME must be set", systemMavenHome() != null); MvnRunner mvnRunner = new MvnRunner(); try { mvnRunner.runMaven(new File("."), "-DskipTests=true -Pcoverage -e", "install"); } catch (MavenExecutionException mee) { System.err.println("caught mee: " + mee.toString()); throw new RuntimeException("run maven failed", mee); } System.out.println( " installed the plugin into the local repo in " + (System.currentTimeMillis() - start) + "ms"); haveInstalledPlugin = true; } public static String systemMavenHome() { return System.getenv("M2_HOME"); } public static void assertArtifactInLocalRepo(String groupId, String artifactId, String version) throws IOException, MavenInvocationException { assertThat("Could not find artifact " + artifactId + " in repository", artifactInLocalRepo(groupId, artifactId, version), is(0)); } public static void assertArtifactNotInLocalRepo(String groupId, String artifactId, String version) throws IOException, MavenInvocationException { assertThat("Found artifact " + artifactId + " in repository", artifactInLocalRepo(groupId, artifactId, version), not(is(0))); } private static int artifactInLocalRepo(String groupId, String artifactId, String version) throws IOException, MavenInvocationException { String artifact = groupId + ":" + artifactId + ":" + version + ":pom"; File temp = new File("target/downloads/" + RandomNameGenerator.getInstance().randomName()); InvocationRequest request = new DefaultInvocationRequest(); request.setGoals(Collections.singletonList("org.apache.maven.plugins:maven-dependency-plugin:2.8:copy")); Properties props = new Properties(); props.setProperty("artifact", artifact); props.setProperty("outputDirectory", temp.getCanonicalPath()); request.setProperties(props); Invoker invoker = new DefaultInvoker(); CollectingLogOutputStream logOutput = new CollectingLogOutputStream(false); invoker.setOutputHandler(new PrintStreamHandler(new PrintStream(logOutput), true)); return invoker.execute(request).getExitCode(); } public List<String> runMaven(File workingDir, String... arguments) { InvocationRequest request = new DefaultInvocationRequest(); request.setGoals(asList(arguments)); request.setBaseDirectory(workingDir); request.setDebug(false); request.setShowErrors(false); Invoker invoker = new DefaultInvoker(); invoker.setMavenHome(mvnHome); CollectingLogOutputStream logOutput = new CollectingLogOutputStream(logToStandardOut); invoker.setOutputHandler(new PrintStreamHandler(new PrintStream(logOutput), true)); //invoker.setErrorHandler(new PrintStreamHandler(new PrintStream(logOutput), true)); int exitCode; try { InvocationResult result = invoker.execute(request); exitCode = result.getExitCode(); } catch (Exception e) { throw new MavenExecutionException(1, logOutput.getLines()); } List<String> output = logOutput.getLines(); if (exitCode != 0) { throw new MavenExecutionException(exitCode, output); } return output; } }
[ "\"M2_HOME\"" ]
[]
[ "M2_HOME" ]
[]
["M2_HOME"]
java
1
0
tests/test_orm.py
import unittest import os from orm import ORM from orm import model from orm import fields class User(model.Model): first_name = fields.CharField(max_length=40, blank=True) last_name = fields.CharField(max_length=40, blank=True) username = fields.CharField(max_length=40) password = fields.CharField(max_length=40) testDBpath = "test.db" class ormTest(unittest.TestCase): def setUp(self): if os.path.isfile(testDBpath): os.remove(testDBpath) self.o = ORM(os.getenv('DATABASE_URL', "sqlite://local.db")) if self.o.db.db == "postgres": self.o.db.cursor.execute("drop schema public cascade") self.o.db.cursor.execute("create schema public") def tearDown(self): if os.path.isfile(testDBpath): os.remove(testDBpath) def testCreateDB(self): if self.o.db.db == "sqlite": self.assertTrue(os.path.isfile(testDBpath)) def testOrmInit(self): self.assertEqual(self.o.db.db is not None, True) def testRegisterModel(self): self.o.registerModel(User) self.assertEqual(len(self.o.models), 1) self.assertTrue(self.o.initTables()) if self.o.db == "sqlite": r = self.o.db.query("SELECT name FROM sqlite_master WHERE type='table' AND name='users'") self.assertEqual(r.fetchone(), {"name": "users"}) self.o.db.query("SELECT * FROM users") self.assertEqual(set([x[0] for x in self.o.db.cursor.description]), set(['first_name', 'last_name', 'password', 'pk', 'username'])) if __name__ == '__main__': unittest.main()
[]
[]
[ "DATABASE_URL" ]
[]
["DATABASE_URL"]
python
1
0
config/default.py
''' Config Proto ''' import sys import os ####### INPUT OUTPUT ####### # debug isDebug = True # The name of the current model for output name = 'default' # The folder to save log and model log_base_dir = './log/' # Whether to save the model checkpoints and result logs save_model = True # The interval between writing summary summary_interval = 100 # Prefix to the image files # data_prefix = os.environ["DATABASES2"] + "/caricature/WebCaricature/webcaric_5ptaligned_sc0.7_256/" data_prefix = "./data/webcaricacture_aligned_256/" # Training data list train_dataset_path = "./data/train.txt" # Test data list test_dataset_path = "./data/test.txt" # Target image size (h,w) for the input of network image_size = (256, 256) # 3 channels means RGB, 1 channel for grayscale channels = 3 # Preprocess for training preprocess_train = [ ['random_flip'], ['standardize', 'mean_scale'], ] # Preprocess for testing preprocess_test = [ ['standardize', 'mean_scale'], ] # Number of GPUs num_gpus = 1 ####### NETWORK ####### # The network architecture network = 'models/default.py' # Dimensionality of the bottleneck layer in discriminator bottleneck_size = 512 # Dimensionality of the style space style_size = 8 ####### TRAINING STRATEGY ####### # Optimizer optimizer = ("ADAM", {'beta1': 0.5, 'beta2': 0.9}) # optimizer = ("MOM", {'momentum': 0.9}) # Number of samples per batch batch_size = 2 # Number of batches per epoch epoch_size = 5000 # Number of epochs num_epochs = 20 # learning rate strategy learning_rate_strategy = 'linear' # learning rate schedule lr = 0.0001 learning_rate_schedule = { 'initial': 1 * lr, 'start': 50000, 'end_step': 100000, } # Restore model restore_model = "" # "pretrained/discriminator_casia_256/" # Keywords to filter restore variables, set None for all restore_scopes = ['Discriminator/conv', 'Discriminator/Bot'] # Weight decay for model variables weight_decay = 1e-4 # Keep probability for dropouts keep_prob = 1.0 ####### LOSS FUNCTION ####### # Weight of the global adversarial loss coef_adv = 1.0 # Weight of the patch adversarial loss coef_patch_adv = 2.0 # Weight of the identity mapping loss coef_idt = 10.0
[]
[]
[ "DATABASES2" ]
[]
["DATABASES2"]
python
1
0
mathics/settings.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import pkg_resources import sys import os from os import path DEBUG = True TEMPLATE_DEBUG = DEBUG # set only to True in DEBUG mode DEBUG_MAIL = True PROPAGATE_EXCEPTIONS = True DISPLAY_EXCEPTIONS = True DEBUG_PRINT = False LOG_QUERIES = False # Either None (no timeout) or a positive integer. # unix only TIMEOUT = None # specifies a maximum recursion depth is safe for all Python environments # without setting a custom thread stack size. DEFAULT_MAX_RECURSION_DEPTH = 512 # max pickle.dumps() size for storing results in DB # historically 10000 was used on public mathics servers MAX_STORED_SIZE = 10000 ADMINS = ( ('Admin', '[email protected]'), ) MANAGERS = ADMINS ROOT_DIR = pkg_resources.resource_filename('mathics', '') if sys.platform.startswith('win'): DATA_DIR = os.environ['APPDATA'].replace(os.sep, '/') + '/Python/Mathics/' else: DATA_DIR = path.expanduser('~/.local/var/mathics/') # if not path.exists(DATA_DIR): # os.makedirs(DATA_DIR) DOC_DIR = os.path.join(ROOT_DIR, 'doc/documentation/') DOC_TEX_DATA = os.path.join(ROOT_DIR, 'doc/tex/data') DOC_XML_DATA = os.path.join(ROOT_DIR, 'doc/xml/data') DOC_LATEX_FILE = os.path.join(ROOT_DIR, 'doc/tex/documentation.tex') DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': DATA_DIR + 'mathics.sqlite' } } REQUIRE_LOGIN = False # if REQUIRE_LOGIN is True be sure to set up an email sender: EMAIL_HOST = 'smtp.sendgrid.net' EMAIL_HOST_USER = 'mathics' EMAIL_HOST_PASSWORD = '' EMAIL_PORT = 587 EMAIL_USE_TLS = True # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'Europe/Vienna' # Set this True if you prefer 12 hour time to be the default TIME_12HOUR = False # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # Leave this True unless you have specific reason for not permitting # users to access local files ENABLE_FILES_MODULE = True # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds static files. STATIC_ROOT = os.path.join(ROOT_DIR, 'web/media/') # URL that handles the media served from STATIC_ROOT. STATIC_URL = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'uvbhuiasaeaph6Duh)r@3ex1i@et=0j4h(!p4@!r6s-=a_ev*e' # List of callables that know how to import templates from various sources. # TEMPLATE_LOADERS = ( # 'django.template.loaders.filesystem.load_template_source', # 'django.template.loaders.app_directories.load_template_source', # ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) ROOT_URLCONF = 'mathics.urls' default_pymathics_modules = ["pymathics.natlang",] TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(ROOT_DIR, 'web/templates/') ], } ] AUTHENTICATION_BACKENDS = ( 'mathics.web.authentication.EmailModelBackend', ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'mathics.web', )
[]
[]
[ "APPDATA" ]
[]
["APPDATA"]
python
1
0
neptune_load/bulk_load_data.py
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 from neptune_load.sigv4_signer.sigv4_signer import SigV4Signer from neptune_load.bulk_loader.bulk_loader import BulkLoader import logging import os import sys logger = logging.getLogger("bulk_load") logger.setLevel(logging.INFO) def kill_all_active(loader: BulkLoader): loads = loader.get_active_loads() logger.info(f"Loading {loads}") for load in loads: loader._load_id = load try: loader.cancel_load() except Exception as e: logger.warn(f"Failed to cancel {load} {e}") loader._load_id = None return if __name__ == "__main__": # parse_input_and_query_neptune() host = f'{os.getenv("NEPTUNE_ENDPOINT")}:8182' source_bucket = os.getenv("S3_BUCKET") loader_role = os.getenv("NEPTUNE_LOADER_IAM_ROLE") region = os.getenv("SERVICE_REGION") file_name = os.getenv("TRIPLE_NAME") source_string = f"s3://{source_bucket}/{file_name}" signer = SigV4Signer() loader = BulkLoader( signer=signer, iam_role_arn=loader_role, region=region, source=source_string, neptune_endpoint=host, ) loads = loader.get_active_loads() logger.info(f"Loading {loads}") kill_all_active(loader) try: loader.wait_for_bulk_load_from_s3() except KeyboardInterrupt as ke: logger.info(f"Cancellation requested") loader.cancel_load() logger.info(f"Final status \n {loader.status.raw}") sys.exit() logger.info(f"Load complete") logger.info(f"Results {loader.status.raw}")
[]
[]
[ "NEPTUNE_LOADER_IAM_ROLE", "S3_BUCKET", "NEPTUNE_ENDPOINT", "TRIPLE_NAME", "SERVICE_REGION" ]
[]
["NEPTUNE_LOADER_IAM_ROLE", "S3_BUCKET", "NEPTUNE_ENDPOINT", "TRIPLE_NAME", "SERVICE_REGION"]
python
5
0
spanner/spanner_snippets/spanner/integration_test.go
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package spanner import ( "bytes" "context" "fmt" "io" "os" "regexp" "strconv" "strings" "testing" "time" "cloud.google.com/go/spanner" database "cloud.google.com/go/spanner/admin/database/apiv1" instance "cloud.google.com/go/spanner/admin/instance/apiv1" "github.com/GoogleCloudPlatform/golang-samples/internal/testutil" "github.com/google/uuid" "google.golang.org/api/iterator" adminpb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" "google.golang.org/grpc/codes" ) type sampleFunc func(w io.Writer, dbName string) error type instanceSampleFunc func(w io.Writer, projectID, instanceID string) error type backupSampleFunc func(w io.Writer, dbName, backupID string) error var ( validInstancePattern = regexp.MustCompile("^projects/(?P<project>[^/]+)/instances/(?P<instance>[^/]+)$") ) func initTest(t *testing.T, id string) (dbName string, cleanup func()) { instance := getInstance(t) dbID := validLength(fmt.Sprintf("smpl-%s", id), t) dbName = fmt.Sprintf("%s/databases/%s", instance, dbID) ctx := context.Background() adminClient, err := database.NewDatabaseAdminClient(ctx) if err != nil { t.Fatalf("failed to create DB admin client: %v", err) } // Check for database existance prior to test start and delete, as resources // may not have been cleaned up from previous invocations. if db, err := adminClient.GetDatabase(ctx, &adminpb.GetDatabaseRequest{Name: dbName}); err == nil { t.Logf("database %s exists in state %s. delete result: %v", db.GetName(), db.GetState().String(), adminClient.DropDatabase(ctx, &adminpb.DropDatabaseRequest{Database: dbName})) } cleanup = func() { testutil.Retry(t, 10, time.Second, func(r *testutil.R) { err := adminClient.DropDatabase(ctx, &adminpb.DropDatabaseRequest{Database: dbName}) if err != nil { r.Errorf("DropDatabase(%q): %v", dbName, err) } }) adminClient.Close() } return } func initBackupTest(t *testing.T, id, dbName string) (restoreDBName, backupID, cancelledBackupID string, cleanup func()) { instance := getInstance(t) restoreDatabaseID := validLength(fmt.Sprintf("restore-%s", id), t) restoreDBName = fmt.Sprintf("%s/databases/%s", instance, restoreDatabaseID) backupID = validLength(fmt.Sprintf("backup-%s", id), t) cancelledBackupID = validLength(fmt.Sprintf("cancel-%s", id), t) ctx := context.Background() adminClient, err := database.NewDatabaseAdminClient(ctx) if err != nil { t.Fatalf("failed to create admin client: %v", err) } if db, err := adminClient.GetDatabase(ctx, &adminpb.GetDatabaseRequest{Name: restoreDBName}); err == nil { t.Logf("database %s exists in state %s. delete result: %v", db.GetName(), db.GetState().String(), adminClient.DropDatabase(ctx, &adminpb.DropDatabaseRequest{Database: restoreDBName})) } // Check for any backups that were created from that database and delete those as well iter := adminClient.ListBackups(ctx, &adminpb.ListBackupsRequest{ Parent: instance, Filter: "database:" + dbName, }) for { resp, err := iter.Next() if err == iterator.Done { break } if err != nil { t.Errorf("Failed to list backups for database %s: %v", dbName, err) } t.Logf("backup %s exists. delete result: %v", resp.Name, adminClient.DeleteBackup(ctx, &adminpb.DeleteBackupRequest{Name: resp.Name})) } cleanup = func() { testutil.Retry(t, 10, time.Second, func(r *testutil.R) { err := adminClient.DropDatabase(ctx, &adminpb.DropDatabaseRequest{Database: restoreDBName}) if err != nil { r.Errorf("DropDatabase(%q): %v", restoreDBName, err) } }) } return } func TestCreateInstance(t *testing.T) { _ = testutil.SystemTest(t) projectID, _, err := parseInstanceName(getInstance(t)) if err != nil { t.Fatalf("failed to parse instance name: %v", err) } instanceID := fmt.Sprintf("go-sample-test-%s", uuid.New().String()[:8]) out := runInstanceSample(t, createInstance, projectID, instanceID, "failed to create an instance") if err := cleanupInstance(projectID, instanceID); err != nil { t.Logf("cleanupInstance error: %s", err) } assertContains(t, out, fmt.Sprintf("Created instance [%s]", instanceID)) } func TestSample(t *testing.T) { _ = testutil.SystemTest(t) dbName, cleanup := initTest(t, randomID()) defer cleanup() var out string mustRunSample(t, createDatabase, dbName, "failed to create a database") runSample(t, createClients, dbName, "failed to create clients") runSample(t, write, dbName, "failed to insert data") runSample(t, addNewColumn, dbName, "failed to add new column") runSample(t, delete, dbName, "failed to delete data") runSample(t, write, dbName, "failed to insert data") runSample(t, update, dbName, "failed to update data") out = runSample(t, writeWithTransactionUsingDML, dbName, "failed to write with transaction using DML") assertContains(t, out, "Moved 200000 from Album2's MarketingBudget to Album1") out = runSample(t, queryNewColumn, dbName, "failed to query new column") assertContains(t, out, "1 1 300000") assertContains(t, out, "2 2 300000") runSample(t, delete, dbName, "failed to delete data") runSample(t, write, dbName, "failed to insert data") runSample(t, update, dbName, "failed to update data") out = runSample(t, writeWithTransaction, dbName, "failed to write with transaction") assertContains(t, out, "Moved 200000 from Album2's MarketingBudget to Album1") out = runSample(t, queryNewColumn, dbName, "failed to query new column") assertContains(t, out, "1 1 300000") assertContains(t, out, "2 2 300000") runSample(t, delete, dbName, "failed to delete data") runSample(t, write, dbName, "failed to insert data") writeTime := time.Now() out = runSample(t, read, dbName, "failed to read data") assertContains(t, out, "1 1 Total Junk") out = runSample(t, query, dbName, "failed to query data") assertContains(t, out, "1 1 Total Junk") runSample(t, addIndex, dbName, "failed to add index") out = runSample(t, queryUsingIndex, dbName, "failed to query using index") assertContains(t, out, "Go, Go, Go") assertContains(t, out, "Forever Hold Your Peace") if strings.Contains(out, "Green") { t.Errorf("got output %q; should not contain Green", out) } out = runSample(t, readUsingIndex, dbName, "failed to read using index") assertContains(t, out, "Go, Go, Go") assertContains(t, out, "Forever Hold Your Peace") assertContains(t, out, "Green") runSample(t, delete, dbName, "failed to delete data") runSample(t, write, dbName, "failed to insert data") runSample(t, update, dbName, "failed to update data") runSample(t, addStoringIndex, dbName, "failed to add storing index") out = runSample(t, readStoringIndex, dbName, "failed to read storing index") assertContains(t, out, "500000") out = runSample(t, readOnlyTransaction, dbName, "failed to read with ReadOnlyTransaction") if strings.Count(out, "Total Junk") != 2 { t.Errorf("got output %q; wanted it to contain 2 occurrences of Total Junk", out) } // Wait at least 15 seconds since the write. time.Sleep(time.Until(writeTime.Add(16 * time.Second))) out = runSample(t, readStaleData, dbName, "failed to read stale data") assertContains(t, out, "Go, Go, Go") assertContains(t, out, "Forever Hold Your Peace") assertContains(t, out, "Green") out = runSample(t, readBatchData, dbName, "failed to read batch data") assertContains(t, out, "1 Marc Richards") runSample(t, addCommitTimestamp, dbName, "failed to add commit timestamp") runSample(t, updateWithTimestamp, dbName, "failed to update with timestamp") out = runSample(t, queryWithTimestamp, dbName, "failed to query with timestamp") assertContains(t, out, "1000000") runSample(t, writeStructData, dbName, "failed to write struct data") out = runSample(t, queryWithStruct, dbName, "failed to query with struct") assertContains(t, out, "6") out = runSample(t, queryWithArrayOfStruct, dbName, "failed to query with array of struct") assertContains(t, out, "6") assertContains(t, out, "7") assertContains(t, out, "8") out = runSample(t, queryWithStructField, dbName, "failed to query with struct field") assertContains(t, out, "6") out = runSample(t, queryWithNestedStructField, dbName, "failed to query with nested struct field") assertContains(t, out, "6 Imagination") assertContains(t, out, "9 Imagination") runSample(t, createTableDocumentsWithTimestamp, dbName, "failed to create documents table with timestamp") runSample(t, writeToDocumentsTable, dbName, "failed to write to documents table") runSample(t, updateDocumentsTable, dbName, "failed to update documents table") out = runSample(t, queryDocumentsTable, dbName, "failed to query documents table") assertContains(t, out, "Hello World 1 Updated") runSample(t, createTableDocumentsWithHistoryTable, dbName, "failed to create documents table with history table") runSample(t, writeWithHistory, dbName, "failed to write with history") runSample(t, updateWithHistory, dbName, "failed to update with history") out = runSample(t, queryWithHistory, dbName, "failed to query with history") assertContains(t, out, "1 1 Hello World 1 Updated") out = runSample(t, insertUsingDML, dbName, "failed to insert using DML") assertContains(t, out, "record(s) inserted") out = runSample(t, setCustomTimeoutAndRetry, dbName, "failed to insert using DML with custom timeout and retry") assertContains(t, out, "record(s) inserted") out = runSample(t, updateUsingDML, dbName, "failed to update using DML") assertContains(t, out, "record(s) updated") out = runSample(t, deleteUsingDML, dbName, "failed to delete using DML") assertContains(t, out, "record(s) deleted") out = runSample(t, updateUsingDMLWithTimestamp, dbName, "failed to update using DML with timestamp") assertContains(t, out, "record(s) updated") out = runSample(t, writeAndReadUsingDML, dbName, "failed to write and read using DML") assertContains(t, out, "Found record name with ") out = runSample(t, updateUsingDMLStruct, dbName, "failed to update using DML with struct") assertContains(t, out, "record(s) inserted") out = runSample(t, writeUsingDML, dbName, "failed to write using DML") assertContains(t, out, "record(s) inserted") out = runSample(t, commitStats, dbName, "failed to request commit stats") assertContains(t, out, "3 mutations in transaction") out = runSample(t, queryWithParameter, dbName, "failed to query with parameter") assertContains(t, out, "12 Melissa Garcia") out = runSample(t, updateUsingPartitionedDML, dbName, "failed to update using partitioned DML") assertContains(t, out, "record(s) updated") out = runSample(t, deleteUsingPartitionedDML, dbName, "failed to delete using partitioned DML") assertContains(t, out, "record(s) deleted") out = runSample(t, updateUsingBatchDML, dbName, "failed to update using batch DML") assertContains(t, out, "Executed 2 SQL statements using Batch DML.") out = runSample(t, createTableWithDatatypes, dbName, "failed to create table with data types") assertContains(t, out, "Created Venues table") runSample(t, writeDatatypesData, dbName, "failed to write data with different data types") out = runSample(t, queryWithArray, dbName, "failed to query with array") assertContains(t, out, "19 Venue 19 2020-11-01") assertContains(t, out, "42 Venue 42 2020-10-01") out = runSample(t, queryWithBool, dbName, "failed to query with bool") assertContains(t, out, "19 Venue 19 true") out = runSample(t, queryWithBytes, dbName, "failed to query with bytes") assertContains(t, out, "4 Venue 4") out = runSample(t, queryWithDate, dbName, "failed to query with date") assertContains(t, out, "4 Venue 4 2018-09-02") assertContains(t, out, "42 Venue 42 2018-10-01") out = runSample(t, queryWithFloat, dbName, "failed to query with float") assertContains(t, out, "4 Venue 4 0.8") assertContains(t, out, "19 Venue 19 0.9") out = runSample(t, queryWithInt, dbName, "failed to query with int") assertContains(t, out, "19 Venue 19 6300") assertContains(t, out, "42 Venue 42 3000") out = runSample(t, queryWithString, dbName, "failed to query with string") assertContains(t, out, "42 Venue 42") // Wait 5 seconds to avoid a time drift issue for the next query: // https://github.com/GoogleCloudPlatform/golang-samples/issues/1146. time.Sleep(time.Second * 5) out = runSample(t, queryWithTimestampParameter, dbName, "failed to query with timestamp parameter") assertContains(t, out, "4 Venue 4") assertContains(t, out, "19 Venue 19") assertContains(t, out, "42 Venue 42") out = runSample(t, queryWithQueryOptions, dbName, "failed to query with query options") assertContains(t, out, "4 Venue 4") assertContains(t, out, "19 Venue 19") assertContains(t, out, "42 Venue 42") out = runSample(t, createClientWithQueryOptions, dbName, "failed to create a client with query options") assertContains(t, out, "4 Venue 4") assertContains(t, out, "19 Venue 19") assertContains(t, out, "42 Venue 42") runSample(t, dropColumn, dbName, "failed to drop column") runSample(t, addNumericColumn, dbName, "failed to add numeric column") runSample(t, updateDataWithNumericColumn, dbName, "failed to update data with numeric") out = runSample(t, queryWithNumericParameter, dbName, "failed to query with numeric parameter") assertContains(t, out, "4 ") assertContains(t, out, "35000") } func TestBackupSample(t *testing.T) { _ = testutil.EndToEndTest(t) id := randomID() dbName, cleanup := initTest(t, id) defer cleanup() restoreDBName, backupID, cancelledBackupID, cleanupBackup := initBackupTest(t, id, dbName) var out string // Set up the database for testing backup operations. mustRunSample(t, createDatabase, dbName, "failed to create a database") runSample(t, write, dbName, "failed to insert data") // Start testing backup operations. out = runBackupSample(t, createBackup, dbName, backupID, "failed to create a backup") assertContains(t, out, fmt.Sprintf("backups/%s", backupID)) out = runBackupSample(t, cancelBackup, dbName, cancelledBackupID, "failed to cancel a backup") assertContains(t, out, "Backup cancelled.") out = runBackupSample(t, listBackups, dbName, backupID, "failed to list backups") assertContains(t, out, fmt.Sprintf("/backups/%s", backupID)) assertContains(t, out, "Backups listed.") out = runSample(t, listBackupOperations, dbName, "failed to list backup operations") assertContains(t, out, fmt.Sprintf("on database %s", dbName)) out = runBackupSample(t, updateBackup, dbName, backupID, "failed to update a backup") assertContains(t, out, fmt.Sprintf("Updated backup %s", backupID)) out = runBackupSampleWithRetry(t, restoreBackup, restoreDBName, backupID, "failed to restore a backup", 10) assertContains(t, out, fmt.Sprintf("Source database %s restored from backup", dbName)) // This sample should run after a restore operation. out = runSample(t, listDatabaseOperations, restoreDBName, "failed to list database operations") assertContains(t, out, fmt.Sprintf("Database %s restored from backup", restoreDBName)) // Delete the restore DB. cleanupBackup() out = runBackupSample(t, deleteBackup, dbName, backupID, "failed to delete a backup") assertContains(t, out, fmt.Sprintf("Deleted backup %s", backupID)) } func TestCreateDatabaseWithRetentionPeriodSample(t *testing.T) { _ = testutil.SystemTest(t) dbName, cleanup := initTest(t, randomID()) defer cleanup() wantRetentionPeriod := "7d" out := runSample(t, createDatabaseWithRetentionPeriod, dbName, "failed to create a database with a retention period") assertContains(t, out, fmt.Sprintf("Created database [%s] with version retention period %q", dbName, wantRetentionPeriod)) } func runSample(t *testing.T, f sampleFunc, dbName, errMsg string) string { var b bytes.Buffer if err := f(&b, dbName); err != nil { t.Errorf("%s: %v", errMsg, err) } return b.String() } func runBackupSample(t *testing.T, f backupSampleFunc, dbName, backupID, errMsg string) string { var b bytes.Buffer if err := f(&b, dbName, backupID); err != nil { t.Errorf("%s: %v", errMsg, err) } return b.String() } func runBackupSampleWithRetry(t *testing.T, f backupSampleFunc, dbName, backupID, errMsg string, maxAttempts int) string { var b bytes.Buffer testutil.Retry(t, maxAttempts, time.Minute, func(r *testutil.R) { b.Reset() if err := f(&b, dbName, backupID); err != nil { if spanner.ErrCode(err) == codes.InvalidArgument && strings.Contains(err.Error(), "Please retry the operation once the pending restores complete") { r.Errorf("%s: %v", errMsg, err) } else { t.Fatalf("%s: %v", errMsg, err) } } }) return b.String() } func runInstanceSample(t *testing.T, f instanceSampleFunc, projectID, instanceID, errMsg string) string { var b bytes.Buffer if err := f(&b, projectID, instanceID); err != nil { t.Errorf("%s: %v", errMsg, err) } return b.String() } func mustRunSample(t *testing.T, f sampleFunc, dbName, errMsg string) string { var b bytes.Buffer if err := f(&b, dbName); err != nil { t.Fatalf("%s: %v", errMsg, err) } return b.String() } func getInstance(t *testing.T) string { instance := os.Getenv("GOLANG_SAMPLES_SPANNER") if instance == "" { t.Skip("Skipping spanner integration test. Set GOLANG_SAMPLES_SPANNER.") } if !strings.HasPrefix(instance, "projects/") { t.Fatal("Spanner instance ref must be in the form of 'projects/PROJECT_ID/instances/INSTANCE_ID'") } return instance } func assertContains(t *testing.T, out string, sub string) { t.Helper() if !strings.Contains(out, sub) { t.Errorf("got output %q; want it to contain %q", out, sub) } } // Maximum length of database name is 30 characters, so trim if the generated name is too long func validLength(databaseName string, t *testing.T) (trimmedName string) { if len(databaseName) > 30 { trimmedName := databaseName[:30] t.Logf("Name too long, '%s' trimmed to '%s'", databaseName, trimmedName) return trimmedName } return databaseName } func cleanupInstance(projectID, instanceID string) error { ctx := context.Background() instanceAdmin, err := instance.NewInstanceAdminClient(ctx) if err != nil { return fmt.Errorf("cannot create instance databaseAdmin client: %v", err) } defer instanceAdmin.Close() instanceName := fmt.Sprintf("projects/%s/instances/%s", projectID, instanceID) if err := instanceAdmin.DeleteInstance(ctx, &instancepb.DeleteInstanceRequest{Name: instanceName}); err != nil { return fmt.Errorf("failed to delete instance %s (error %v), might need a manual removal", instanceName, err) } return nil } func randomID() string { now := time.Now().UTC() return fmt.Sprintf("%s-%s", strconv.FormatInt(now.Unix(), 10), uuid.New().String()[:8]) } func parseInstanceName(instanceName string) (project, instance string, err error) { matches := validInstancePattern.FindStringSubmatch(instanceName) if len(matches) == 0 { return "", "", fmt.Errorf("failed to parse database name from %q according to pattern %q", instanceName, validInstancePattern.String()) } return matches[1], matches[2], nil }
[ "\"GOLANG_SAMPLES_SPANNER\"" ]
[]
[ "GOLANG_SAMPLES_SPANNER" ]
[]
["GOLANG_SAMPLES_SPANNER"]
go
1
0
newrelic-murmur.go
package main import ( "flag" "log" "os" "github.com/yvasiyarov/newrelic_platform_go" ) const ( AGENT_NAME = "Murmur" AGENT_GUID = "com.github.mikoim.newrelic.murmur" AGENT_VERSION = "0.0.3" ) func main() { var ( host = flag.String("host", "localhost", "Murmur host") port = flag.Int("port", 64738, "Murmur port") licenseEnv = os.Getenv("NEW_RELIC_LICENSE_KEY") license = flag.String("license", "", "New Relic license key") interval = flag.Int("interval", 60, "Poll interval (seconds)") timeout = flag.Int("timeout", 1000, "Timeout (milliseconds)") verbose = flag.Bool("verbose", false, "Verbose") ) flag.Parse() if licenseEnv == "" && *license == "" { log.Fatal("New Relic license key is required.") } if *license == "" { license = &licenseEnv } plugin := newrelic_platform_go.NewNewrelicPlugin(AGENT_VERSION, *license, *interval) component := newrelic_platform_go.NewPluginComponent(AGENT_NAME, AGENT_GUID, *verbose) plugin.AddComponent(component) client := NewMumbleClient(*host, *port, *timeout, *interval) component.AddMetrica(NewMetricaConnectedUsers(client)) component.AddMetrica(NewMetricaMaximumBitrate(client)) component.AddMetrica(NewMetricaMaximumUsers(client)) component.AddMetrica(NewMetricaTotalBandwidth(client)) plugin.Verbose = *verbose plugin.Run() }
[ "\"NEW_RELIC_LICENSE_KEY\"" ]
[]
[ "NEW_RELIC_LICENSE_KEY" ]
[]
["NEW_RELIC_LICENSE_KEY"]
go
1
0
qmessentials-observation-service/main.go
package main import ( "bytes" "context" "encoding/json" "errors" "io/ioutil" "net/http" "os" "time" "github.com/cleareyeconsulting/qmessentials/observations/models" "github.com/go-chi/chi" "github.com/joho/godotenv" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" ) func init() { godotenv.Load() } func main() { zerolog.TimeFieldFormat = zerolog.TimeFormatUnix zerolog.SetGlobalLevel(zerolog.DebugLevel) log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) log.Debug().Msg("Started") r := chi.NewRouter() r.Post("/observations", handlePostObservation) r.Post("/observation-groups", handlePostObservationGroup) r.Get("/observations", handleGetObservations) port, ok := os.LookupEnv("PORT") if !ok { port = "5000" } http.ListenAndServe(":"+port, r) } //observations POST store to database then post to broker func handlePostObservation(w http.ResponseWriter, r *http.Request) { bodyBytes, err := ioutil.ReadAll(r.Body) if err != nil { log.Error().Err(err).Msg("") w.WriteHeader(http.StatusInternalServerError) return } var observation models.Observation err = json.Unmarshal(bodyBytes, &observation) if err != nil { log.Error().Err(err).Msg("") w.WriteHeader(http.StatusInternalServerError) return } err = addObservation(&observation) if err != nil { log.Error().Err(err).Msg("") w.WriteHeader(http.StatusInternalServerError) return } err = postObservationToBroker(&observation) if err != nil { log.Error().Err(err).Msg("") w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) } func handlePostObservationGroup(w http.ResponseWriter, r *http.Request) { bodyBytes, err := ioutil.ReadAll(r.Body) if err != nil { log.Error().Err(err).Msg("") w.WriteHeader(http.StatusInternalServerError) return } var observations []models.Observation err = json.Unmarshal(bodyBytes, &observations) if err != nil { log.Error().Err(err).Msg("") w.WriteHeader(http.StatusInternalServerError) return } err = addObservationGroup(&observations) if err != nil { log.Error().Err(err).Msg("") w.WriteHeader(http.StatusInternalServerError) return } err = postObservationGroupToBroker(&observations) if err != nil { log.Error().Err(err).Msg("") w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) } //observations GET by lot func handleGetObservations(w http.ResponseWriter, r *http.Request) { lotID := chi.URLParam(r, "lotID") if lotID == "" { log.Warn().Msg("GET observations received without lot ID") w.WriteHeader(http.StatusBadRequest) return } ctx, client, _, collection, err := getMongoDB("observations") if err != nil { log.Error().Err(err).Msg("") w.WriteHeader(http.StatusInternalServerError) return } defer client.Disconnect(ctx) csr, err := collection.Find(ctx, bson.D{{Key: "lotId", Value: lotID}}) if err != nil { log.Error().Err(err).Msg("") w.WriteHeader(http.StatusInternalServerError) return } var observations []models.Observation csr.All(ctx, &observations) observationsJSON, err := json.Marshal(observations) if err != nil { log.Error().Err(err).Msg("") w.WriteHeader(http.StatusInternalServerError) return } w.Write(observationsJSON) w.WriteHeader(http.StatusOK) } //doing this directly in main rather than split out a repository, etc., because this is a complex //database operation that need to happen all together and quickly, so no room for abstractions func addObservation(observation *models.Observation) error { ctx, client, _, collection, err := getMongoDB("observations") if err != nil { return err } defer client.Disconnect(ctx) if os.Getenv("MONGODB_HAS_REPLICA_SET") == "true" { //Running in a transaction requires a replica set. If this is a relatively low-volume installation, //a transaction shouldn't be necessary. But if multiple simulataneous requests are expected, you need //the transaction to make sure the sequence numbers are correct. session, err := client.StartSession() if err != nil { return err } defer session.EndSession(ctx) _, err = session.WithTransaction(ctx, func(sessCtx mongo.SessionContext) (interface{}, error) { maxItemSequenceNumber, err := getMaxItemSequenceNumber(sessCtx, collection, observation.ItemID) if err != nil { return nil, err } maxLotSequenceNumber, err := getMaxLotSequenceNumber(sessCtx, collection, observation.LotID) if err != nil { return nil, err } observation.ItemSequenceNumber = *maxItemSequenceNumber + 1 observation.LotSequenceNumber = *maxLotSequenceNumber + 1 _, err = collection.InsertOne(sessCtx, &observation) if err != nil { return nil, err } return nil, nil }) if err != nil { return err } return nil } else { maxItemSequenceNumber, err := getMaxItemSequenceNumber(ctx, collection, observation.ItemID) if err != nil { return err } observation.ItemSequenceNumber = *maxItemSequenceNumber + 1 maxLotSequenceNumber, err := getMaxLotSequenceNumber(ctx, collection, observation.LotID) if err != nil { return err } observation.LotSequenceNumber = *maxLotSequenceNumber + 1 _, err = collection.InsertOne(ctx, &observation) if err != nil { return err } return nil } } func getMongoDB(collectionName string) (context.Context, *mongo.Client, *mongo.Database, *mongo.Collection, error) { client, err := mongo.NewClient(options.Client().ApplyURI(os.Getenv("MONGODB_CONNECTION_STRING"))) if err != nil { return nil, nil, nil, nil, err } ctx := context.Background() err = client.Connect(ctx) if err != nil { return nil, nil, nil, nil, err } database := client.Database(os.Getenv("MONGODB_DATABASE_NAME")) collection := database.Collection(collectionName) return ctx, client, database, collection, nil } func addObservationGroup(observations *[]models.Observation) error { var itemID string var lotID string for i, observation := range *observations { if i == 0 { itemID = observation.ItemID lotID = observation.LotID } else { if observation.ItemID != itemID || observation.LotID != lotID { return errors.New("Observations in observation group must all be from the same item") } } } client, err := mongo.NewClient(options.Client().ApplyURI(os.Getenv("MONGODB_CONNECTION_STRING"))) if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() //Not sure if this is right err = client.Connect(ctx) if err != nil { return err } defer client.Disconnect(ctx) database := client.Database("qmessentialsObservations") collection := database.Collection("observations") var newItemSequenceNumber int var newLotSequenceNumber int //TODO: Refactor this to remove duplication if os.Getenv("MONGODB_HAS_REPLICA_SET") == "true" { //Running in a transaction requires a replica set. If this is a relatively low-volume installation, //a transaction shouldn't be necessary. But if multiple simulataneous requests are expected, you need //the transaction to make sure the sequence numbers are correct. session, err := client.StartSession() if err != nil { return err } defer session.EndSession(ctx) _, err = session.WithTransaction(ctx, func(sessCtx mongo.SessionContext) (interface{}, error) { maxItemSequenceNumber, err := getMaxItemSequenceNumber(sessCtx, collection, itemID) if err != nil { return nil, err } maxLotSequenceNumber, err := getMaxLotSequenceNumber(sessCtx, collection, lotID) if err != nil { return nil, err } newItemSequenceNumber = *maxItemSequenceNumber + 1 newLotSequenceNumber = *maxLotSequenceNumber + 1 for _, observation := range *observations { observation.ItemSequenceNumber = newItemSequenceNumber newItemSequenceNumber += 1 observation.LotSequenceNumber = newLotSequenceNumber newLotSequenceNumber += 1 } //Go doesn't automatically cast []T to []interface{}, you have to do it explicitly recordsToInsert := make([]interface{}, len(*observations)) for i, observation := range *observations { recordsToInsert[i] = observation } _, err = collection.InsertMany(sessCtx, recordsToInsert) if err != nil { return nil, err } return nil, nil }) if err != nil { return err } return nil } else { maxItemSequenceNumber, err := getMaxItemSequenceNumber(ctx, collection, itemID) if err != nil { return err } maxLotSequenceNumber, err := getMaxLotSequenceNumber(ctx, collection, lotID) if err != nil { return err } newItemSequenceNumber = *maxItemSequenceNumber + 1 newLotSequenceNumber = *maxLotSequenceNumber + 1 for _, observation := range *observations { observation.ItemSequenceNumber = newItemSequenceNumber newItemSequenceNumber += 1 observation.LotSequenceNumber = newLotSequenceNumber newLotSequenceNumber += 1 } recordsToInsert := make([]interface{}, len(*observations)) for i, observation := range *observations { recordsToInsert[i] = observation } _, err = collection.InsertMany(ctx, recordsToInsert) if err != nil { return err } return nil } } func getMaxItemSequenceNumber(ctx context.Context, collection *mongo.Collection, itemID string) (*int, error) { findOptions := options.Find() findOptions.SetSort(bson.D{{Key: "itemSequenceNumber", Value: -1}}) findOptions.SetLimit(1) csr, err := collection.Find(ctx, bson.D{{Key: "itemId", Value: itemID}}, findOptions) if err != nil { return nil, err } if !csr.Next(ctx) { rv := 0 return &rv, nil } var obs models.Observation err = csr.Decode(&obs) if err != nil { return nil, err } err = csr.Close(ctx) if err != nil { return nil, err } return &obs.ItemSequenceNumber, err } func getMaxLotSequenceNumber(ctx context.Context, collection *mongo.Collection, lotID string) (*int, error) { findOptions := options.Find() findOptions.SetSort(bson.D{{Key: "lotSequenceNumber", Value: -1}}) findOptions.SetLimit(1) csr, err := collection.Find(ctx, bson.D{{Key: "lotId", Value: lotID}}, findOptions) if err != nil { return nil, err } if !csr.Next(ctx) { rv := 0 return &rv, nil } var obs models.Observation err = csr.Decode(&obs) if err != nil { return nil, err } err = csr.Close(ctx) if err != nil { return nil, err } return &obs.LotSequenceNumber, err } func postObservationToBroker(observation *models.Observation) error { observationBytes, err := json.Marshal(observation) if err != nil { return err } _, err = http.Post(os.Getenv("CALCULATION_BROKER")+"/observations", "application/json", bytes.NewBuffer(observationBytes)) return err } func postObservationGroupToBroker(observations *[]models.Observation) error { observationBytes, err := json.Marshal(observations) if err != nil { return err } _, err = http.Post(os.Getenv("CALCULATION_BROKER")+"/observation-groups", "application/json", bytes.NewBuffer(observationBytes)) return err }
[ "\"MONGODB_HAS_REPLICA_SET\"", "\"MONGODB_CONNECTION_STRING\"", "\"MONGODB_DATABASE_NAME\"", "\"MONGODB_CONNECTION_STRING\"", "\"MONGODB_HAS_REPLICA_SET\"", "\"CALCULATION_BROKER\"", "\"CALCULATION_BROKER\"" ]
[]
[ "MONGODB_HAS_REPLICA_SET", "MONGODB_DATABASE_NAME", "MONGODB_CONNECTION_STRING", "CALCULATION_BROKER" ]
[]
["MONGODB_HAS_REPLICA_SET", "MONGODB_DATABASE_NAME", "MONGODB_CONNECTION_STRING", "CALCULATION_BROKER"]
go
4
0
devops/devops/wsgi.py
""" WSGI config for devops project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devops.settings") application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
startup/GafferScene/usd.py
########################################################################## # # Copyright (c) 2018, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import os import sys import ctypes import IECore moduleSearchPath = IECore.SearchPath( os.environ["PYTHONPATH"] ) if moduleSearchPath.find( "IECoreUSD" ) : # Import the USD Python module _without_ RTLD_GLOBAL, otherwise # we get errors like the following spewed to the shell when we first # open a USD file : # # ``` # Coding Error: in DefinePythonClass at line 932 of /disk1/john/dev/gafferDependencies/USD/working/USD-18.09/pxr/base/lib/tf/type.cpp # -- TfType 'TfNotice' already has a defined Python type; cannot redefine # ``` # # > Note : RTLD_GLOBAL is turned on in the first place by IECore/__init__.py. # > Ideally we'd stop doing that and wouldn't need this workaround. See # > https://github.com/ImageEngine/cortex/pull/810. try : originalDLOpenFlags = sys.getdlopenflags() sys.setdlopenflags( originalDLOpenFlags & ~ctypes.RTLD_GLOBAL ) from pxr import Usd finally : sys.setdlopenflags( originalDLOpenFlags ) # Import IECoreUSD so that we get the USD SceneInterface registered, # providing USD functionality to both the SceneReader and SceneWriter. import IECoreUSD
[]
[]
[ "PYTHONPATH" ]
[]
["PYTHONPATH"]
python
1
0
gen-meta.py
import os from clang.cindex import * # # UTILS # def has_child(node, kind, spel = ''): for child in node.get_children(): if child.kind == kind and child.spelling == spel: return True return False def has_child_any(node, kind, spels): for spel in spels: if has_child(node, kind, spel): return True return False def has_base_spec(node, spec): for child in node.get_children(): if child.kind == CursorKind.CXX_BASE_SPECIFIER: if has_child(child, CursorKind.TYPE_REF, spec): return True return False def join_namespace(a, b): if len(a) == 0: return b return a + '::' + b def enum_namespace(node, name, full): if node.kind != CursorKind.NAMESPACE: return [] if node.spelling == name: return [[node, full]] x = [] for child in node.get_children(): x += enum_namespace( child, name, join_namespace(full, node.spelling)) return x def get_namespaces(node, name, full = ''): x = [] if node.kind == CursorKind.NAMESPACE: x += enum_namespace(node, name, full) else: for child in node.get_children(): x += get_namespaces(child, name) return x def is_punct(spel): puncts = [ '(', ')', '[', ']', '{', '}', '<', '>', ';', ',', '::', ':' ] return spel in puncts def need_space(a, b): if a == ':': return True return (not is_punct(a)) and (not is_punct(b)) def get_endl(node): if node.kind == CursorKind.NAMESPACE: return '' return ';' def tokens_of(node): decl = '' prev = '' for token in node.get_tokens(): curr = token.spelling if len(decl) > 0 and need_space(prev, curr): decl += ' ' decl += curr prev = curr return decl + get_endl(node) def print_tree(node, s = ''): print(s + str(node.kind)) if len(node.spelling) > 0: print(s + node.spelling) for child in node.get_children(): print_tree(child, s + ' ') # # CODEGEN # tag_entity = 'struct laplace::__gen::entity' tag_real = 'struct laplace::__gen::real' tag_points = 'struct laplace::__gen::points' def print_access(node, s = ''): if node.kind == CursorKind.STRUCT_DECL: print(s + 'public:') elif node.kind == CursorKind.CLASS_DECL: print(s + 'private:') def has_meta_tag(node): return has_child_any( node, CursorKind.TYPE_REF, [tag_entity, tag_real, tag_points]) def gen_meta_tag(node, s = ''): if has_child(node, CursorKind.TYPE_REF, tag_real): print(s + 'private:') print(s + ' sl::index n_' + node.spelling + ' = {};\n') print(s + 'public:') print(s + ' static void set_' + node.spelling + '(entity en, intval value) noexcept;') print(s + ' [[nodiscard]] static auto get_' + node.spelling + '(entity en) noexcept -> intval;') print(s + ' [[nodiscard]] static auto scale_of_' + node.spelling + '(entity en) noexcept -> intval;\n') def print_helpers(s = ''): print(s + 'public:') print(s + ' using entity = laplace::engine::access::entity const &;\n') def gen_meta_tags(node, s = ''): for child in node.get_children(): if has_meta_tag(child): gen_meta_tag(child, s) def print_without_meta_tags(node, s = ''): for child in node.get_children(): if not has_meta_tag(child): print(s + tokens_of(child)) def gen_entity(node, s = ''): print(s + '/* Generated entity.') print(s + ' */') print(s + 'class ' + node.spelling + ' : public laplace::engine::basic_entity {') print_helpers(s); gen_meta_tags(node, s) print_access(node, s); print_without_meta_tags(node, s + ' '); print(s + '};') def print_without_using(node, tag, s = ''): if has_child(node, CursorKind.NAMESPACE_REF, tag): print(s + '/* ' + tokens_of(node) + ' */\n') else: print(s + tokens_of(node) + '\n') def modify_node(node, s = ''): if has_base_spec(node, tag_entity): gen_entity(node, s) else: print_without_using(node, '__gen', s) def modify_namespace(node, name, s = ''): print(s + '/**') print(s + ' * AST') print(s + ' *') print_tree(node, s) print(s + ' */\n') print(s + 'namespace ' + name + ' {') print(s + ' using laplace::engine::intval;\n') for child in node.get_children(): modify_node(child, s + ' ') print(s + '}') def process(node): namespaces = get_namespaces(node, '__meta') for x in namespaces: modify_namespace(x[0], x[1]) def main(): if 'CLANG_LIBRARY_PATH' in os.environ: Config.set_library_path(os.environ['CLANG_LIBRARY_PATH']) index = None try: index = Index.create() except: print( 'Set CLANG_LIBRARY_PATH environment variable to your <LLVM>/bin folder.') return u = index.parse('__source.cpp', unsaved_files=[ ('__source.cpp', '#include "unit.in.h"\n') ]) process(u.cursor) if __name__ == '__main__': main()
[]
[]
[ "CLANG_LIBRARY_PATH" ]
[]
["CLANG_LIBRARY_PATH"]
python
1
0
duple-api/duple/__init__.py
import logging import sys import os logging.basicConfig( level=logging.getLevelName(os.getenv("LOG_LEVEL") or "INFO"), format="[%(asctime)s] [%(levelname)s] %(message)s", handlers=[logging.StreamHandler(sys.stdout)], ) logger = logging.getLogger()
[]
[]
[ "LOG_LEVEL" ]
[]
["LOG_LEVEL"]
python
1
0
confluence/examples/contentProperties/gets/gets.go
package main import ( "context" "github.com/ctreminiom/go-atlassian/confluence" "log" "net/http" "os" ) func main() { var ( host = os.Getenv("HOST") mail = os.Getenv("MAIL") token = os.Getenv("TOKEN") ) instance, err := confluence.New(nil, host) if err != nil { log.Fatal(err) } instance.Auth.SetBasicAuth(mail, token) instance.Auth.SetUserAgent("curl/7.54.0") var ( contentID = "80412692" expand = []string{"version"} startAt = 0 maxResults = 50 ) properties, response, err := instance.Content.Property.Gets(context.Background(), contentID, expand, startAt, maxResults) if err != nil { if response != nil { if response.Code == http.StatusBadRequest { log.Println(response.API) } } log.Fatal(err) } log.Println("Endpoint:", response.Endpoint) log.Println("Status Code:", response.Code) for _, property := range properties.Results { log.Println(property) } }
[ "\"HOST\"", "\"MAIL\"", "\"TOKEN\"" ]
[]
[ "MAIL", "HOST", "TOKEN" ]
[]
["MAIL", "HOST", "TOKEN"]
go
3
0
tests/projects/test_projects.py
import os import git import shutil import tempfile import yaml from distutils import dir_util import mock import pytest import mlflow from mlflow.entities import RunStatus, ViewType, Experiment, SourceType from mlflow.exceptions import ExecutionException, MlflowException from mlflow.store.file_store import FileStore from mlflow.utils import env from mlflow.utils.mlflow_tags import MLFLOW_PARENT_RUN_ID, MLFLOW_USER, MLFLOW_SOURCE_NAME, \ MLFLOW_SOURCE_TYPE, MLFLOW_GIT_BRANCH, MLFLOW_GIT_REPO_URL, LEGACY_MLFLOW_GIT_BRANCH_NAME, \ LEGACY_MLFLOW_GIT_REPO_URL, MLFLOW_PROJECT_ENTRY_POINT from tests.projects.utils import TEST_PROJECT_DIR, TEST_PROJECT_NAME, GIT_PROJECT_URI, \ validate_exit_status, assert_dirs_equal from tests.projects.utils import tracking_uri_mock # pylint: disable=unused-import MOCK_USER = "janebloggs" @pytest.fixture def patch_user(): with mock.patch("mlflow.projects._get_user", return_value=MOCK_USER): yield def _build_uri(base_uri, subdirectory): if subdirectory != "": return "%s#%s" % (base_uri, subdirectory) return base_uri def _get_version_local_git_repo(local_git_repo): repo = git.Repo(local_git_repo, search_parent_directories=True) return repo.git.rev_parse("HEAD") @pytest.fixture(scope="module", autouse=True) def clean_mlruns_dir(): yield dir_path = os.path.join(TEST_PROJECT_DIR, "mlruns") if os.path.exists(dir_path): shutil.rmtree(dir_path) @pytest.fixture def local_git_repo(tmpdir): local_git = tmpdir.join('git_repo').strpath repo = git.Repo.init(local_git) dir_util.copy_tree(src=TEST_PROJECT_DIR, dst=local_git) dir_util.copy_tree(src=os.path.dirname(TEST_PROJECT_DIR), dst=local_git) repo.git.add(A=True) repo.index.commit("test") yield os.path.abspath(local_git) @pytest.fixture def local_git_repo_uri(local_git_repo): return "file://%s" % local_git_repo @pytest.fixture def zipped_repo(tmpdir): import zipfile zip_name = tmpdir.join('%s.zip' % TEST_PROJECT_NAME).strpath with zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED) as zip_file: for root, _, files in os.walk(TEST_PROJECT_DIR): for file_name in files: file_path = os.path.join(root, file_name) zip_file.write(file_path, file_path[len(TEST_PROJECT_DIR) + len(os.sep):]) return zip_name def test_is_zip_uri(): assert mlflow.projects._is_zip_uri('http://foo.bar/moo.zip') assert mlflow.projects._is_zip_uri('https://foo.bar/moo.zip') assert mlflow.projects._is_zip_uri('file:///moo.zip') assert mlflow.projects._is_zip_uri('file://C:/moo.zip') assert mlflow.projects._is_zip_uri('/moo.zip') assert mlflow.projects._is_zip_uri('C:/moo.zip') assert not mlflow.projects._is_zip_uri('http://foo.bar/moo') assert not mlflow.projects._is_zip_uri('https://foo.bar/moo') assert not mlflow.projects._is_zip_uri('file:///moo') assert not mlflow.projects._is_zip_uri('file://C:/moo') assert not mlflow.projects._is_zip_uri('/moo') assert not mlflow.projects._is_zip_uri('C:/moo') def test_fetch_project(local_git_repo, local_git_repo_uri, zipped_repo, httpserver): httpserver.serve_content(open(zipped_repo, 'rb').read()) # The tests are as follows: # 1. Fetching a locally saved project. # 2. Fetching a project located in a Git repo root directory. # 3. Fetching a project located in a Git repo subdirectory. # 4. Passing a subdirectory works for local directories. # 5. Fetching a remote ZIP file # 6. Using a local ZIP file # 7. Using a file:// URL to a local ZIP file test_list = [ (TEST_PROJECT_DIR, '', TEST_PROJECT_DIR), (local_git_repo_uri, '', local_git_repo), (local_git_repo_uri, 'example_project', os.path.join(local_git_repo, 'example_project')), (os.path.dirname(TEST_PROJECT_DIR), os.path.basename(TEST_PROJECT_DIR), TEST_PROJECT_DIR), (httpserver.url + '/%s.zip' % TEST_PROJECT_NAME, '', TEST_PROJECT_DIR), (zipped_repo, '', TEST_PROJECT_DIR), ('file://%s' % zipped_repo, '', TEST_PROJECT_DIR), ] for base_uri, subdirectory, expected in test_list: work_dir = mlflow.projects._fetch_project( uri=_build_uri(base_uri, subdirectory), force_tempdir=False) assert_dirs_equal(expected=expected, actual=work_dir) # Test that we correctly determine the dest directory to use when fetching a project. for force_tempdir, uri in [(True, TEST_PROJECT_DIR), (False, GIT_PROJECT_URI)]: dest_dir = mlflow.projects._fetch_project(uri=uri, force_tempdir=force_tempdir) assert os.path.commonprefix([dest_dir, tempfile.gettempdir()]) == tempfile.gettempdir() assert os.path.exists(dest_dir) for force_tempdir, uri in [(None, TEST_PROJECT_DIR), (False, TEST_PROJECT_DIR)]: assert mlflow.projects._fetch_project(uri=uri, force_tempdir=force_tempdir) == \ os.path.abspath(TEST_PROJECT_DIR) def test_fetch_project_validations(local_git_repo_uri): # Verify that runs fail if given incorrect subdirectories via the `#` character. for base_uri in [TEST_PROJECT_DIR, local_git_repo_uri]: with pytest.raises(ExecutionException): mlflow.projects._fetch_project(uri=_build_uri(base_uri, "fake"), force_tempdir=False) # Passing `version` raises an exception for local projects with pytest.raises(ExecutionException): mlflow.projects._fetch_project(uri=TEST_PROJECT_DIR, force_tempdir=False, version="version") def test_dont_remove_mlruns(tmpdir): # Fetching a directory containing an "mlruns" folder doesn't remove the "mlruns" folder src_dir = tmpdir.mkdir("mlruns-src-dir") src_dir.mkdir("mlruns").join("some-file.txt").write("hi") src_dir.join("MLproject").write("dummy MLproject contents") dst_dir = mlflow.projects._fetch_project(uri=src_dir.strpath, version=None, force_tempdir=False) assert_dirs_equal(expected=src_dir.strpath, actual=dst_dir) def test_parse_subdirectory(): # Make sure the parsing works as intended. test_uri = "uri#subdirectory" parsed_uri, parsed_subdirectory = mlflow.projects._parse_subdirectory(test_uri) assert parsed_uri == "uri" assert parsed_subdirectory == "subdirectory" # Make sure periods are restricted in Git repo subdirectory paths. period_fail_uri = GIT_PROJECT_URI + "#.." with pytest.raises(ExecutionException): mlflow.projects._parse_subdirectory(period_fail_uri) def test_invalid_run_mode(tracking_uri_mock): # pylint: disable=unused-argument """ Verify that we raise an exception given an invalid run mode """ with pytest.raises(ExecutionException): mlflow.projects.run(uri=TEST_PROJECT_DIR, backend="some unsupported mode") def test_use_conda(tracking_uri_mock): # pylint: disable=unused-argument """ Verify that we correctly handle the `use_conda` argument.""" # Verify we throw an exception when conda is unavailable old_path = os.environ["PATH"] env.unset_variable("PATH") conda_exe_path = '' if "CONDA_EXE" in os.environ: conda_exe_path = os.environ["CONDA_EXE"] env.unset_variable("CONDA_EXE") try: with pytest.raises(ExecutionException): mlflow.projects.run(TEST_PROJECT_DIR, use_conda=True) finally: os.environ["PATH"] = old_path if conda_exe_path: os.environ["CONDA_EXE"] = conda_exe_path def test_is_valid_branch_name(local_git_repo): assert mlflow.projects._is_valid_branch_name(local_git_repo, "master") assert not mlflow.projects._is_valid_branch_name(local_git_repo, "dev") @pytest.mark.parametrize("use_start_run", map(str, [0, 1])) @pytest.mark.parametrize("version", [None, "master", "git-commit"]) def test_run_local_git_repo(patch_user, # pylint: disable=unused-argument local_git_repo, local_git_repo_uri, tracking_uri_mock, # pylint: disable=unused-argument use_start_run, version): if version is not None: uri = local_git_repo_uri + "#" + TEST_PROJECT_NAME else: uri = os.path.join("%s/" % local_git_repo, TEST_PROJECT_NAME) if version == "git-commit": version = _get_version_local_git_repo(local_git_repo) submitted_run = mlflow.projects.run( uri, entry_point="test_tracking", version=version, parameters={"use_start_run": use_start_run}, use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID) # Blocking runs should be finished when they return validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) # Test that we can call wait() on a synchronous run & that the run has the correct # status after calling wait(). submitted_run.wait() validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) # Validate run contents in the FileStore run_id = submitted_run.run_id mlflow_service = mlflow.tracking.MlflowClient() run_infos = mlflow_service.list_run_infos( experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, run_view_type=ViewType.ACTIVE_ONLY) assert len(run_infos) == 1 store_run_id = run_infos[0].run_id assert run_id == store_run_id run = mlflow_service.get_run(run_id) assert run.info.status == RunStatus.to_string(RunStatus.FINISHED) assert run.data.params == {"use_start_run": use_start_run} assert run.data.metrics == {"some_key": 3} tags = run.data.tags assert tags[MLFLOW_USER] == MOCK_USER assert "file:" in tags[MLFLOW_SOURCE_NAME] assert tags[MLFLOW_SOURCE_TYPE] == SourceType.to_string(SourceType.PROJECT) assert tags[MLFLOW_PROJECT_ENTRY_POINT] == "test_tracking" if version == "master": assert tags[MLFLOW_GIT_BRANCH] == "master" assert tags[MLFLOW_GIT_REPO_URL] == local_git_repo_uri assert tags[LEGACY_MLFLOW_GIT_BRANCH_NAME] == "master" assert tags[LEGACY_MLFLOW_GIT_REPO_URL] == local_git_repo_uri @pytest.mark.parametrize("experiment_id,experiment_name,expected", [("1", None, "1"), (None, 'name', "33")]) def test_resolve_experiment_id(experiment_id, experiment_name, expected): with mock.patch('mlflow.tracking.MlflowClient.get_experiment_by_name') \ as get_experiment_by_name_mock: get_experiment_by_name_mock.return_value = Experiment(experiment_id="33", name='Name', artifact_location=None, lifecycle_stage=None) exp_id = mlflow.projects._resolve_experiment_id(experiment_name=experiment_name, experiment_id=experiment_id) assert exp_id == expected def test_resolve_experiment_id_should_not_allow_both_name_and_id_in_use(): with pytest.raises(MlflowException, match="Specify only one of 'experiment_name' or 'experiment_id'."): _ = mlflow.projects._resolve_experiment_id(experiment_name='experiment_named', experiment_id="44") def test_invalid_version_local_git_repo(local_git_repo_uri, tracking_uri_mock): # pylint: disable=unused-argument # Run project with invalid commit hash with pytest.raises(ExecutionException, match=r'Unable to checkout version \'badc0de\''): mlflow.projects.run(local_git_repo_uri + "#" + TEST_PROJECT_NAME, entry_point="test_tracking", version="badc0de", use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID) @pytest.mark.parametrize("use_start_run", map(str, [0, 1])) def test_run(tmpdir, # pylint: disable=unused-argument patch_user, # pylint: disable=unused-argument tracking_uri_mock, # pylint: disable=unused-argument use_start_run): submitted_run = mlflow.projects.run( TEST_PROJECT_DIR, entry_point="test_tracking", parameters={"use_start_run": use_start_run}, use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID) assert submitted_run.run_id is not None # Blocking runs should be finished when they return validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) # Test that we can call wait() on a synchronous run & that the run has the correct # status after calling wait(). submitted_run.wait() validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) # Validate run contents in the FileStore run_id = submitted_run.run_id mlflow_service = mlflow.tracking.MlflowClient() run_infos = mlflow_service.list_run_infos( experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, run_view_type=ViewType.ACTIVE_ONLY) assert len(run_infos) == 1 store_run_id = run_infos[0].run_id assert run_id == store_run_id run = mlflow_service.get_run(run_id) assert run.info.status == RunStatus.to_string(RunStatus.FINISHED) assert run.data.params == {"use_start_run": use_start_run} assert run.data.metrics == {"some_key": 3} tags = run.data.tags assert tags[MLFLOW_USER] == MOCK_USER assert "file:" in tags[MLFLOW_SOURCE_NAME] assert tags[MLFLOW_SOURCE_TYPE] == SourceType.to_string(SourceType.PROJECT) assert tags[MLFLOW_PROJECT_ENTRY_POINT] == "test_tracking" def test_run_with_parent(tmpdir, tracking_uri_mock): # pylint: disable=unused-argument """Verify that if we are in a nested run, mlflow.projects.run() will have a parent_run_id.""" with mlflow.start_run(): parent_run_id = mlflow.active_run().info.run_id submitted_run = mlflow.projects.run( TEST_PROJECT_DIR, entry_point="test_tracking", parameters={"use_start_run": "1"}, use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID) assert submitted_run.run_id is not None validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) run_id = submitted_run.run_id run = mlflow.tracking.MlflowClient().get_run(run_id) assert run.data.tags[MLFLOW_PARENT_RUN_ID] == parent_run_id def test_run_async(tracking_uri_mock): # pylint: disable=unused-argument submitted_run0 = mlflow.projects.run( TEST_PROJECT_DIR, entry_point="sleep", parameters={"duration": 2}, use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, synchronous=False) validate_exit_status(submitted_run0.get_status(), RunStatus.RUNNING) submitted_run0.wait() validate_exit_status(submitted_run0.get_status(), RunStatus.FINISHED) submitted_run1 = mlflow.projects.run( TEST_PROJECT_DIR, entry_point="sleep", parameters={"duration": -1, "invalid-param": 30}, use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, synchronous=False) submitted_run1.wait() validate_exit_status(submitted_run1.get_status(), RunStatus.FAILED) @pytest.mark.parametrize( "mock_env,expected_conda,expected_activate", [ ({"CONDA_EXE": "/abc/conda"}, "/abc/conda", "/abc/activate"), ({mlflow.projects.MLFLOW_CONDA_HOME: "/some/dir/"}, "/some/dir/bin/conda", "/some/dir/bin/activate") ] ) def test_conda_path(mock_env, expected_conda, expected_activate): """Verify that we correctly determine the path to conda executables""" with mock.patch.dict("os.environ", mock_env): assert mlflow.projects._get_conda_bin_executable("conda") == expected_conda assert mlflow.projects._get_conda_bin_executable("activate") == expected_activate def test_cancel_run(tracking_uri_mock): # pylint: disable=unused-argument submitted_run0, submitted_run1 = [mlflow.projects.run( TEST_PROJECT_DIR, entry_point="sleep", parameters={"duration": 2}, use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, synchronous=False) for _ in range(2)] submitted_run0.cancel() validate_exit_status(submitted_run0.get_status(), RunStatus.FAILED) # Sanity check: cancelling one run has no effect on the other assert submitted_run1.wait() validate_exit_status(submitted_run1.get_status(), RunStatus.FINISHED) # Try cancelling after calling wait() submitted_run1.cancel() validate_exit_status(submitted_run1.get_status(), RunStatus.FINISHED) def test_storage_dir(tmpdir): """ Test that we correctly handle the `storage_dir` argument, which specifies where to download distributed artifacts passed to arguments of type `path`. """ assert os.path.dirname(mlflow.projects._get_storage_dir(tmpdir.strpath)) == tmpdir.strpath assert os.path.dirname(mlflow.projects._get_storage_dir(None)) == tempfile.gettempdir() def test_parse_kubernetes_config(): work_dir = "./examples/docker" kubernetes_config = { "kube-context": "docker-for-desktop", "kube-job-template-path": os.path.join(work_dir, "kubernetes_job_template.yaml"), "repository-uri": "dockerhub_account/mlflow-kubernetes-example" } yaml_obj = None with open(kubernetes_config["kube-job-template-path"], 'r') as job_template: yaml_obj = yaml.safe_load(job_template.read()) kube_config = mlflow.projects._parse_kubernetes_config(kubernetes_config) assert kube_config["kube-context"] == kubernetes_config["kube-context"] assert kube_config["kube-job-template-path"] == kubernetes_config["kube-job-template-path"] assert kube_config["repository-uri"] == kubernetes_config["repository-uri"] assert kube_config["kube-job-template"] == yaml_obj def test_parse_kubernetes_config_without_context(): kubernetes_config = { "repository-uri": "dockerhub_account/mlflow-kubernetes-example", "kube-job-template-path": "kubernetes_job_template.yaml" } with pytest.raises(ExecutionException): mlflow.projects._parse_kubernetes_config(kubernetes_config) def test_parse_kubernetes_config_without_image_uri(): kubernetes_config = { "kube-context": "docker-for-desktop", "kube-job-template-path": "kubernetes_job_template.yaml" } with pytest.raises(ExecutionException): mlflow.projects._parse_kubernetes_config(kubernetes_config) def test_parse_kubernetes_config_invalid_template_job_file(): kubernetes_config = { "kube-context": "docker-for-desktop", "repository-uri": "username/mlflow-kubernetes-example", "kube-job-template-path": "file_not_found.yaml" } with pytest.raises(ExecutionException): mlflow.projects._parse_kubernetes_config(kubernetes_config)
[]
[]
[ "CONDA_EXE", "PATH" ]
[]
["CONDA_EXE", "PATH"]
python
2
0
providers/github.go
package providers import ( "encoding/json" "fmt" "net/http" "os" "github.com/philpearl/oauth2" "github.com/philpearl/oauth2/github" ) type GithubProvider struct { GenericProvider } type githubUser struct { Id int `json:"id"` Login string `json:"login"` Name string `json:"name"` Email string `json:"email"` } /* Github() creates a github oauth client. The client ID and client secret are taken from the environment variables GITHUB_CLIENT_ID & GITHUB_CLIENT_SECRET */ func Github(baseUrl string) Provider { config := &oauth2.Config{ ClientID: os.Getenv("GITHUB_CLIENT_ID"), ClientSecret: os.Getenv("GITHUB_CLIENT_SECRET"), Endpoint: github.Endpoint, RedirectURL: baseUrl + "callback/", Scopes: []string{"user:email"}, } return &GithubProvider{ GenericProvider: GenericProvider{ Name: "github", Config: config, }, } } func (p *GithubProvider) GetUserInfo(r *http.Request, client *http.Client, token *oauth2.Token) (map[string]interface{}, error) { req, err := http.NewRequest("GET", "https://api.github.com/user", nil) req.Header.Add("Accept", "application/vnd.github.v3+json") resp, err := client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("github return error. %d %s", resp.StatusCode, resp.Status) } // fields we care about include id, login, name, email dec := json.NewDecoder(resp.Body) var user githubUser err = dec.Decode(&user) if err != nil { return nil, err } result := map[string]interface{}{ PROVIDER_EMAIL: user.Email, PROVIDER_USERNAME: user.Login, PROVIDER_ID: user.Id, PROVIDER_NAME: user.Name, } return result, nil }
[ "\"GITHUB_CLIENT_ID\"", "\"GITHUB_CLIENT_SECRET\"" ]
[]
[ "GITHUB_CLIENT_ID", "GITHUB_CLIENT_SECRET" ]
[]
["GITHUB_CLIENT_ID", "GITHUB_CLIENT_SECRET"]
go
2
0
mesonbuild/interpreter.py
# Copyright 2012-2018 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import mparser from . import environment from . import coredata from . import dependencies from . import mlog from . import build from . import optinterpreter from . import compilers from .wrap import wrap, WrapMode from . import mesonlib from .mesonlib import FileMode, MachineChoice, Popen_safe, listify, extract_as_list, has_path_sep from .dependencies import ExternalProgram from .dependencies import InternalDependency, Dependency, NotFoundDependency, DependencyException from .depfile import DepFile from .interpreterbase import InterpreterBase from .interpreterbase import check_stringlist, flatten, noPosargs, noKwargs, stringArgs, permittedKwargs, noArgsFlattening from .interpreterbase import InterpreterException, InvalidArguments, InvalidCode, SubdirDoneRequest from .interpreterbase import InterpreterObject, MutableInterpreterObject, Disabler, disablerIfNotFound from .interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs from .interpreterbase import ObjectHolder from .modules import ModuleReturnValue from .cmake import CMakeInterpreter from pathlib import Path, PurePath import os, shutil, uuid import re, shlex import subprocess from collections import namedtuple from itertools import chain import functools from typing import Sequence, List, Union, Optional, Dict, Any import importlib permitted_method_kwargs = { 'partial_dependency': {'compile_args', 'link_args', 'links', 'includes', 'sources'}, } def stringifyUserArguments(args): if isinstance(args, list): return '[%s]' % ', '.join([stringifyUserArguments(x) for x in args]) elif isinstance(args, dict): return '{%s}' % ', '.join(['%s : %s' % (stringifyUserArguments(k), stringifyUserArguments(v)) for k, v in args.items()]) elif isinstance(args, int): return str(args) elif isinstance(args, str): return "'%s'" % args raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.') class OverrideProgram(dependencies.ExternalProgram): pass class FeatureOptionHolder(InterpreterObject, ObjectHolder): def __init__(self, env, name, option): InterpreterObject.__init__(self) ObjectHolder.__init__(self, option) if option.is_auto(): self.held_object = env.coredata.builtins['auto_features'] self.name = name self.methods.update({'enabled': self.enabled_method, 'disabled': self.disabled_method, 'auto': self.auto_method, }) @noPosargs @permittedKwargs({}) def enabled_method(self, args, kwargs): return self.held_object.is_enabled() @noPosargs @permittedKwargs({}) def disabled_method(self, args, kwargs): return self.held_object.is_disabled() @noPosargs @permittedKwargs({}) def auto_method(self, args, kwargs): return self.held_object.is_auto() def extract_required_kwarg(kwargs, subproject, feature_check=None, default=True): val = kwargs.get('required', default) disabled = False required = False feature = None if isinstance(val, FeatureOptionHolder): if not feature_check: feature_check = FeatureNew('User option "feature"', '0.47.0') feature_check.use(subproject) option = val.held_object feature = val.name if option.is_disabled(): disabled = True elif option.is_enabled(): required = True elif isinstance(val, bool): required = val else: raise InterpreterException('required keyword argument must be boolean or a feature option') # Keep boolean value in kwargs to simplify other places where this kwarg is # checked. kwargs['required'] = required return disabled, required, feature class TryRunResultHolder(InterpreterObject): def __init__(self, res): super().__init__() self.res = res self.methods.update({'returncode': self.returncode_method, 'compiled': self.compiled_method, 'stdout': self.stdout_method, 'stderr': self.stderr_method, }) @noPosargs @permittedKwargs({}) def returncode_method(self, args, kwargs): return self.res.returncode @noPosargs @permittedKwargs({}) def compiled_method(self, args, kwargs): return self.res.compiled @noPosargs @permittedKwargs({}) def stdout_method(self, args, kwargs): return self.res.stdout @noPosargs @permittedKwargs({}) def stderr_method(self, args, kwargs): return self.res.stderr class RunProcess(InterpreterObject): def __init__(self, cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir=False, check=False, capture=True): super().__init__() if not isinstance(cmd, ExternalProgram): raise AssertionError('BUG: RunProcess must be passed an ExternalProgram') self.capture = capture pc, self.stdout, self.stderr = self.run_command(cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir, check) self.returncode = pc.returncode self.methods.update({'returncode': self.returncode_method, 'stdout': self.stdout_method, 'stderr': self.stderr_method, }) def run_command(self, cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir, check=False): command_array = cmd.get_command() + args menv = {'MESON_SOURCE_ROOT': source_dir, 'MESON_BUILD_ROOT': build_dir, 'MESON_SUBDIR': subdir, 'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in mesonintrospect]), } if in_builddir: cwd = os.path.join(build_dir, subdir) else: cwd = os.path.join(source_dir, subdir) child_env = os.environ.copy() child_env.update(menv) child_env = env.get_env(child_env) stdout = subprocess.PIPE if self.capture else subprocess.DEVNULL mlog.debug('Running command:', ' '.join(command_array)) try: p, o, e = Popen_safe(command_array, stdout=stdout, env=child_env, cwd=cwd) if self.capture: mlog.debug('--- stdout ---') mlog.debug(o) else: o = '' mlog.debug('--- stdout disabled ---') mlog.debug('--- stderr ---') mlog.debug(e) mlog.debug('') if check and p.returncode != 0: raise InterpreterException('Command "{}" failed with status {}.'.format(' '.join(command_array), p.returncode)) return p, o, e except FileNotFoundError: raise InterpreterException('Could not execute command "%s".' % ' '.join(command_array)) @noPosargs @permittedKwargs({}) def returncode_method(self, args, kwargs): return self.returncode @noPosargs @permittedKwargs({}) def stdout_method(self, args, kwargs): return self.stdout @noPosargs @permittedKwargs({}) def stderr_method(self, args, kwargs): return self.stderr class ConfigureFileHolder(InterpreterObject, ObjectHolder): def __init__(self, subdir, sourcename, targetname, configuration_data): InterpreterObject.__init__(self) obj = build.ConfigureFile(subdir, sourcename, targetname, configuration_data) ObjectHolder.__init__(self, obj) class EnvironmentVariablesHolder(MutableInterpreterObject, ObjectHolder): def __init__(self, initial_values=None): MutableInterpreterObject.__init__(self) ObjectHolder.__init__(self, build.EnvironmentVariables()) self.methods.update({'set': self.set_method, 'append': self.append_method, 'prepend': self.prepend_method, }) if isinstance(initial_values, dict): for k, v in initial_values.items(): self.set_method([k, v], {}) elif isinstance(initial_values, list): for e in initial_values: if '=' not in e: raise InterpreterException('Env var definition must be of type key=val.') (k, val) = e.split('=', 1) k = k.strip() val = val.strip() if ' ' in k: raise InterpreterException('Env var key must not have spaces in it.') self.set_method([k, val], {}) elif initial_values: raise AssertionError('Unsupported EnvironmentVariablesHolder initial_values') def __repr__(self): repr_str = "<{0}: {1}>" return repr_str.format(self.__class__.__name__, self.held_object.envvars) def add_var(self, method, args, kwargs): if not isinstance(kwargs.get("separator", ""), str): raise InterpreterException("EnvironmentVariablesHolder methods 'separator'" " argument needs to be a string.") if len(args) < 2: raise InterpreterException("EnvironmentVariablesHolder methods require at least" "2 arguments, first is the name of the variable and" " following one are values") # Warn when someone tries to use append() or prepend() on an env var # which already has an operation set on it. People seem to think that # multiple append/prepend operations stack, but they don't. if method != self.held_object.set and self.held_object.has_name(args[0]): mlog.warning('Overriding previous value of environment variable {!r} with a new one' .format(args[0]), location=self.current_node) self.held_object.add_var(method, args[0], args[1:], kwargs) @stringArgs @permittedKwargs({'separator'}) def set_method(self, args, kwargs): self.add_var(self.held_object.set, args, kwargs) @stringArgs @permittedKwargs({'separator'}) def append_method(self, args, kwargs): self.add_var(self.held_object.append, args, kwargs) @stringArgs @permittedKwargs({'separator'}) def prepend_method(self, args, kwargs): self.add_var(self.held_object.prepend, args, kwargs) class ConfigurationDataHolder(MutableInterpreterObject, ObjectHolder): def __init__(self, pv, initial_values=None): MutableInterpreterObject.__init__(self) self.used = False # These objects become immutable after use in configure_file. ObjectHolder.__init__(self, build.ConfigurationData(), pv) self.methods.update({'set': self.set_method, 'set10': self.set10_method, 'set_quoted': self.set_quoted_method, 'has': self.has_method, 'get': self.get_method, 'get_unquoted': self.get_unquoted_method, 'merge_from': self.merge_from_method, }) if isinstance(initial_values, dict): for k, v in initial_values.items(): self.set_method([k, v], {}) elif initial_values: raise AssertionError('Unsupported ConfigurationDataHolder initial_values') def is_used(self): return self.used def mark_used(self): self.used = True def validate_args(self, args, kwargs): if len(args) == 1 and isinstance(args[0], list) and len(args[0]) == 2: mlog.deprecation('Passing a list as the single argument to ' 'configuration_data.set is deprecated. This will ' 'become a hard error in the future.', location=self.current_node) args = args[0] if len(args) != 2: raise InterpreterException("Configuration set requires 2 arguments.") if self.used: raise InterpreterException("Can not set values on configuration object that has been used.") name, val = args if not isinstance(val, (int, str)): msg = 'Setting a configuration data value to {!r} is invalid, ' \ 'and will fail at configure_file(). If you are using it ' \ 'just to store some values, please use a dict instead.' mlog.deprecation(msg.format(val), location=self.current_node) desc = kwargs.get('description', None) if not isinstance(name, str): raise InterpreterException("First argument to set must be a string.") if desc is not None and not isinstance(desc, str): raise InterpreterException('Description must be a string.') return name, val, desc @noArgsFlattening def set_method(self, args, kwargs): (name, val, desc) = self.validate_args(args, kwargs) self.held_object.values[name] = (val, desc) def set_quoted_method(self, args, kwargs): (name, val, desc) = self.validate_args(args, kwargs) if not isinstance(val, str): raise InterpreterException("Second argument to set_quoted must be a string.") escaped_val = '\\"'.join(val.split('"')) escaped_val = '\\n'.join(escaped_val.split('\n')) self.held_object.values[name] = ('"' + escaped_val + '"', desc) def set10_method(self, args, kwargs): (name, val, desc) = self.validate_args(args, kwargs) if val: self.held_object.values[name] = (1, desc) else: self.held_object.values[name] = (0, desc) def has_method(self, args, kwargs): return args[0] in self.held_object.values @FeatureNew('configuration_data.get()', '0.38.0') @noArgsFlattening def get_method(self, args, kwargs): if len(args) < 1 or len(args) > 2: raise InterpreterException('Get method takes one or two arguments.') name = args[0] if name in self.held_object: return self.held_object.get(name)[0] if len(args) > 1: return args[1] raise InterpreterException('Entry %s not in configuration data.' % name) @FeatureNew('configuration_data.get_unquoted()', '0.44.0') def get_unquoted_method(self, args, kwargs): if len(args) < 1 or len(args) > 2: raise InterpreterException('Get method takes one or two arguments.') name = args[0] if name in self.held_object: val = self.held_object.get(name)[0] elif len(args) > 1: val = args[1] else: raise InterpreterException('Entry %s not in configuration data.' % name) if val[0] == '"' and val[-1] == '"': return val[1:-1] return val def get(self, name): return self.held_object.values[name] # (val, desc) def keys(self): return self.held_object.values.keys() def merge_from_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Merge_from takes one positional argument.') from_object = args[0] if not isinstance(from_object, ConfigurationDataHolder): raise InterpreterException('Merge_from argument must be a configuration data object.') from_object = from_object.held_object for k, v in from_object.values.items(): self.held_object.values[k] = v # Interpreter objects can not be pickled so we must have # these wrappers. class DependencyHolder(InterpreterObject, ObjectHolder): def __init__(self, dep, pv): InterpreterObject.__init__(self) ObjectHolder.__init__(self, dep, pv) self.methods.update({'found': self.found_method, 'type_name': self.type_name_method, 'version': self.version_method, 'name': self.name_method, 'get_pkgconfig_variable': self.pkgconfig_method, 'get_configtool_variable': self.configtool_method, 'get_variable': self.variable_method, 'partial_dependency': self.partial_dependency_method, 'include_type': self.include_type_method, 'as_system': self.as_system_method, }) def found(self): return self.found_method([], {}) @noPosargs @permittedKwargs({}) def type_name_method(self, args, kwargs): return self.held_object.type_name @noPosargs @permittedKwargs({}) def found_method(self, args, kwargs): if self.held_object.type_name == 'internal': return True return self.held_object.found() @noPosargs @permittedKwargs({}) def version_method(self, args, kwargs): return self.held_object.get_version() @noPosargs @permittedKwargs({}) def name_method(self, args, kwargs): return self.held_object.get_name() @permittedKwargs({'define_variable', 'default'}) def pkgconfig_method(self, args, kwargs): args = listify(args) if len(args) != 1: raise InterpreterException('get_pkgconfig_variable takes exactly one argument.') varname = args[0] if not isinstance(varname, str): raise InterpreterException('Variable name must be a string.') return self.held_object.get_pkgconfig_variable(varname, kwargs) @FeatureNew('dep.get_configtool_variable', '0.44.0') @permittedKwargs({}) def configtool_method(self, args, kwargs): args = listify(args) if len(args) != 1: raise InterpreterException('get_configtool_variable takes exactly one argument.') varname = args[0] if not isinstance(varname, str): raise InterpreterException('Variable name must be a string.') return self.held_object.get_configtool_variable(varname) @FeatureNew('dep.partial_dependency', '0.46.0') @noPosargs @permittedKwargs(permitted_method_kwargs['partial_dependency']) def partial_dependency_method(self, args, kwargs): pdep = self.held_object.get_partial_dependency(**kwargs) return DependencyHolder(pdep, self.subproject) @FeatureNew('dep.get_variable', '0.51.0') @noPosargs @permittedKwargs({'cmake', 'pkgconfig', 'configtool', 'default_value', 'pkgconfig_define'}) def variable_method(self, args, kwargs): return self.held_object.get_variable(**kwargs) @FeatureNew('dep.include_type', '0.52.0') @noPosargs @permittedKwargs({}) def include_type_method(self, args, kwargs): return self.held_object.get_include_type() @FeatureNew('dep.as_system', '0.52.0') @permittedKwargs({}) def as_system_method(self, args, kwargs): args = listify(args) new_is_system = 'system' if len(args) > 1: raise InterpreterException('as_system takes only one optional value') if len(args) == 1: new_is_system = args[0] new_dep = self.held_object.generate_system_dependency(new_is_system) return DependencyHolder(new_dep, self.subproject) class ExternalProgramHolder(InterpreterObject, ObjectHolder): def __init__(self, ep): InterpreterObject.__init__(self) ObjectHolder.__init__(self, ep) self.methods.update({'found': self.found_method, 'path': self.path_method}) self.cached_version = None @noPosargs @permittedKwargs({}) def found_method(self, args, kwargs): return self.found() @noPosargs @permittedKwargs({}) def path_method(self, args, kwargs): return self.held_object.get_path() def found(self): return isinstance(self.held_object, build.Executable) or self.held_object.found() def get_command(self): return self.held_object.get_command() def get_name(self): return self.held_object.get_name() def get_version(self, interpreter): if not self.cached_version: raw_cmd = self.get_command() + ['--version'] cmd = [self, '--version'] res = interpreter.run_command_impl(interpreter.current_node, cmd, {}, True) if res.returncode != 0: m = 'Running {!r} failed' raise InterpreterException(m.format(raw_cmd)) output = res.stdout.strip() if not output: output = res.stderr.strip() match = re.search(r'([0-9\.]+)', output) if not match: m = 'Could not find a version number in output of {!r}' raise InterpreterException(m.format(raw_cmd)) self.cached_version = match.group(1) return self.cached_version class ExternalLibraryHolder(InterpreterObject, ObjectHolder): def __init__(self, el, pv): InterpreterObject.__init__(self) ObjectHolder.__init__(self, el, pv) self.methods.update({'found': self.found_method, 'type_name': self.type_name_method, 'partial_dependency': self.partial_dependency_method, }) def found(self): return self.held_object.found() @noPosargs @permittedKwargs({}) def type_name_method(self, args, kwargs): return self.held_object.type_name @noPosargs @permittedKwargs({}) def found_method(self, args, kwargs): return self.found() def get_name(self): return self.held_object.name def get_compile_args(self): return self.held_object.get_compile_args() def get_link_args(self): return self.held_object.get_link_args() def get_exe_args(self): return self.held_object.get_exe_args() @FeatureNew('dep.partial_dependency', '0.46.0') @noPosargs @permittedKwargs(permitted_method_kwargs['partial_dependency']) def partial_dependency_method(self, args, kwargs): pdep = self.held_object.get_partial_dependency(**kwargs) return DependencyHolder(pdep, self.subproject) class GeneratorHolder(InterpreterObject, ObjectHolder): @FeatureNewKwargs('generator', '0.43.0', ['capture']) def __init__(self, interp, args, kwargs): self.interpreter = interp InterpreterObject.__init__(self) ObjectHolder.__init__(self, build.Generator(args, kwargs), interp.subproject) self.methods.update({'process': self.process_method}) @FeatureNewKwargs('generator.process', '0.45.0', ['preserve_path_from']) @permittedKwargs({'extra_args', 'preserve_path_from'}) def process_method(self, args, kwargs): extras = mesonlib.stringlistify(kwargs.get('extra_args', [])) if 'preserve_path_from' in kwargs: preserve_path_from = kwargs['preserve_path_from'] if not isinstance(preserve_path_from, str): raise InvalidArguments('Preserve_path_from must be a string.') preserve_path_from = os.path.normpath(preserve_path_from) if not os.path.isabs(preserve_path_from): # This is a bit of a hack. Fix properly before merging. raise InvalidArguments('Preserve_path_from must be an absolute path for now. Sorry.') else: preserve_path_from = None gl = self.held_object.process_files('Generator', args, self.interpreter, preserve_path_from, extra_args=extras) return GeneratedListHolder(gl) class GeneratedListHolder(InterpreterObject, ObjectHolder): def __init__(self, arg1, extra_args=None): InterpreterObject.__init__(self) if isinstance(arg1, GeneratorHolder): ObjectHolder.__init__(self, build.GeneratedList(arg1.held_object, extra_args if extra_args is not None else [])) else: ObjectHolder.__init__(self, arg1) def __repr__(self): r = '<{}: {!r}>' return r.format(self.__class__.__name__, self.held_object.get_outputs()) def add_file(self, a): self.held_object.add_file(a) # A machine that's statically known from the cross file class MachineHolder(InterpreterObject, ObjectHolder): def __init__(self, machine_info): InterpreterObject.__init__(self) ObjectHolder.__init__(self, machine_info) self.methods.update({'system': self.system_method, 'cpu': self.cpu_method, 'cpu_family': self.cpu_family_method, 'endian': self.endian_method, }) @noPosargs @permittedKwargs({}) def cpu_family_method(self, args, kwargs): return self.held_object.cpu_family @noPosargs @permittedKwargs({}) def cpu_method(self, args, kwargs): return self.held_object.cpu @noPosargs @permittedKwargs({}) def system_method(self, args, kwargs): return self.held_object.system @noPosargs @permittedKwargs({}) def endian_method(self, args, kwargs): return self.held_object.endian class IncludeDirsHolder(InterpreterObject, ObjectHolder): def __init__(self, idobj): InterpreterObject.__init__(self) ObjectHolder.__init__(self, idobj) class Headers(InterpreterObject): def __init__(self, sources, kwargs): InterpreterObject.__init__(self) self.sources = sources self.install_subdir = kwargs.get('subdir', '') if os.path.isabs(self.install_subdir): mlog.deprecation('Subdir keyword must not be an absolute path. This will be a hard error in the next release.') self.custom_install_dir = kwargs.get('install_dir', None) self.custom_install_mode = kwargs.get('install_mode', None) if self.custom_install_dir is not None: if not isinstance(self.custom_install_dir, str): raise InterpreterException('Custom_install_dir must be a string.') def set_install_subdir(self, subdir): self.install_subdir = subdir def get_install_subdir(self): return self.install_subdir def get_sources(self): return self.sources def get_custom_install_dir(self): return self.custom_install_dir def get_custom_install_mode(self): return self.custom_install_mode class DataHolder(InterpreterObject, ObjectHolder): def __init__(self, data): InterpreterObject.__init__(self) ObjectHolder.__init__(self, data) def get_source_subdir(self): return self.held_object.source_subdir def get_sources(self): return self.held_object.sources def get_install_dir(self): return self.held_object.install_dir class InstallDir(InterpreterObject): def __init__(self, src_subdir, inst_subdir, install_dir, install_mode, exclude, strip_directory): InterpreterObject.__init__(self) self.source_subdir = src_subdir self.installable_subdir = inst_subdir self.install_dir = install_dir self.install_mode = install_mode self.exclude = exclude self.strip_directory = strip_directory class Man(InterpreterObject): def __init__(self, sources, kwargs): InterpreterObject.__init__(self) self.sources = sources self.validate_sources() self.custom_install_dir = kwargs.get('install_dir', None) self.custom_install_mode = kwargs.get('install_mode', None) if self.custom_install_dir is not None and not isinstance(self.custom_install_dir, str): raise InterpreterException('Custom_install_dir must be a string.') def validate_sources(self): for s in self.sources: try: num = int(s.split('.')[-1]) except (IndexError, ValueError): num = 0 if num < 1 or num > 8: raise InvalidArguments('Man file must have a file extension of a number between 1 and 8') def get_custom_install_dir(self): return self.custom_install_dir def get_custom_install_mode(self): return self.custom_install_mode def get_sources(self): return self.sources class GeneratedObjectsHolder(InterpreterObject, ObjectHolder): def __init__(self, held_object): InterpreterObject.__init__(self) ObjectHolder.__init__(self, held_object) class TargetHolder(InterpreterObject, ObjectHolder): def __init__(self, target, interp): InterpreterObject.__init__(self) ObjectHolder.__init__(self, target, interp.subproject) self.interpreter = interp class BuildTargetHolder(TargetHolder): def __init__(self, target, interp): super().__init__(target, interp) self.methods.update({'extract_objects': self.extract_objects_method, 'extract_all_objects': self.extract_all_objects_method, 'get_id': self.get_id_method, 'outdir': self.outdir_method, 'full_path': self.full_path_method, 'private_dir_include': self.private_dir_include_method, }) def __repr__(self): r = '<{} {}: {}>' h = self.held_object return r.format(self.__class__.__name__, h.get_id(), h.filename) def is_cross(self): return not self.held_object.environment.machines.matches_build_machine(self.held_object.for_machine) @noPosargs @permittedKwargs({}) def private_dir_include_method(self, args, kwargs): return IncludeDirsHolder(build.IncludeDirs('', [], False, [self.interpreter.backend.get_target_private_dir(self.held_object)])) @noPosargs @permittedKwargs({}) def full_path_method(self, args, kwargs): return self.interpreter.backend.get_target_filename_abs(self.held_object) @noPosargs @permittedKwargs({}) def outdir_method(self, args, kwargs): return self.interpreter.backend.get_target_dir(self.held_object) @permittedKwargs({}) def extract_objects_method(self, args, kwargs): gobjs = self.held_object.extract_objects(args) return GeneratedObjectsHolder(gobjs) @FeatureNewKwargs('extract_all_objects', '0.46.0', ['recursive']) @noPosargs @permittedKwargs({'recursive'}) def extract_all_objects_method(self, args, kwargs): recursive = kwargs.get('recursive', False) gobjs = self.held_object.extract_all_objects(recursive) if gobjs.objlist and 'recursive' not in kwargs: mlog.warning('extract_all_objects called without setting recursive ' 'keyword argument. Meson currently defaults to ' 'non-recursive to maintain backward compatibility but ' 'the default will be changed in the future.', location=self.current_node) return GeneratedObjectsHolder(gobjs) @noPosargs @permittedKwargs({}) def get_id_method(self, args, kwargs): return self.held_object.get_id() class ExecutableHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) class StaticLibraryHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) class SharedLibraryHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) # Set to True only when called from self.func_shared_lib(). target.shared_library_only = False class BothLibrariesHolder(BuildTargetHolder): def __init__(self, shared_holder, static_holder, interp): # FIXME: This build target always represents the shared library, but # that should be configurable. super().__init__(shared_holder.held_object, interp) self.shared_holder = shared_holder self.static_holder = static_holder self.methods.update({'get_shared_lib': self.get_shared_lib_method, 'get_static_lib': self.get_static_lib_method, }) def __repr__(self): r = '<{} {}: {}, {}: {}>' h1 = self.shared_holder.held_object h2 = self.static_holder.held_object return r.format(self.__class__.__name__, h1.get_id(), h1.filename, h2.get_id(), h2.filename) @noPosargs @permittedKwargs({}) def get_shared_lib_method(self, args, kwargs): return self.shared_holder @noPosargs @permittedKwargs({}) def get_static_lib_method(self, args, kwargs): return self.static_holder class SharedModuleHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) class JarHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) class CustomTargetIndexHolder(InterpreterObject, ObjectHolder): def __init__(self, object_to_hold): InterpreterObject.__init__(self) ObjectHolder.__init__(self, object_to_hold) class CustomTargetHolder(TargetHolder): def __init__(self, target, interp): super().__init__(target, interp) self.methods.update({'full_path': self.full_path_method, }) def __repr__(self): r = '<{} {}: {}>' h = self.held_object return r.format(self.__class__.__name__, h.get_id(), h.command) @noPosargs @permittedKwargs({}) def full_path_method(self, args, kwargs): return self.interpreter.backend.get_target_filename_abs(self.held_object) def __getitem__(self, index): return CustomTargetIndexHolder(self.held_object[index]) def __setitem__(self, index, value): raise InterpreterException('Cannot set a member of a CustomTarget') def __delitem__(self, index): raise InterpreterException('Cannot delete a member of a CustomTarget') def outdir_include(self): return IncludeDirsHolder(build.IncludeDirs('', [], False, [os.path.join('@BUILD_ROOT@', self.interpreter.backend.get_target_dir(self.held_object))])) class RunTargetHolder(TargetHolder): def __init__(self, target, interp): super().__init__(target, interp) def __repr__(self): r = '<{} {}: {}>' h = self.held_object return r.format(self.__class__.__name__, h.get_id(), h.command) class Test(InterpreterObject): def __init__(self, name: str, project: str, suite: List[str], exe: build.Executable, depends: List[Union[build.CustomTarget, build.BuildTarget]], is_parallel: bool, cmd_args: List[str], env: build.EnvironmentVariables, should_fail: bool, timeout: int, workdir: Optional[str], protocol: str, priority: int): InterpreterObject.__init__(self) self.name = name self.suite = suite self.project_name = project self.exe = exe self.depends = depends self.is_parallel = is_parallel self.cmd_args = cmd_args self.env = env self.should_fail = should_fail self.timeout = timeout self.workdir = workdir self.protocol = protocol self.priority = priority def get_exe(self): return self.exe def get_name(self): return self.name class SubprojectHolder(InterpreterObject, ObjectHolder): def __init__(self, subinterpreter, subproject_dir, name): InterpreterObject.__init__(self) ObjectHolder.__init__(self, subinterpreter) self.name = name self.subproject_dir = subproject_dir self.methods.update({'get_variable': self.get_variable_method, 'found': self.found_method, }) @noPosargs @permittedKwargs({}) def found_method(self, args, kwargs): return self.found() def found(self): return self.held_object is not None @permittedKwargs({}) @noArgsFlattening def get_variable_method(self, args, kwargs): if len(args) < 1 or len(args) > 2: raise InterpreterException('Get_variable takes one or two arguments.') if not self.found(): raise InterpreterException('Subproject "%s/%s" disabled can\'t get_variable on it.' % ( self.subproject_dir, self.name)) varname = args[0] if not isinstance(varname, str): raise InterpreterException('Get_variable first argument must be a string.') try: return self.held_object.variables[varname] except KeyError: pass if len(args) == 2: return args[1] raise InvalidArguments('Requested variable "{0}" not found.'.format(varname)) header_permitted_kwargs = set([ 'required', 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', ]) find_library_permitted_kwargs = set([ 'has_headers', 'required', 'dirs', 'static', ]) find_library_permitted_kwargs |= set(['header_' + k for k in header_permitted_kwargs]) class CompilerHolder(InterpreterObject): def __init__(self, compiler, env, subproject): InterpreterObject.__init__(self) self.compiler = compiler self.environment = env self.subproject = subproject self.methods.update({'compiles': self.compiles_method, 'links': self.links_method, 'get_id': self.get_id_method, 'compute_int': self.compute_int_method, 'sizeof': self.sizeof_method, 'get_define': self.get_define_method, 'check_header': self.check_header_method, 'has_header': self.has_header_method, 'has_header_symbol': self.has_header_symbol_method, 'run': self.run_method, 'has_function': self.has_function_method, 'has_member': self.has_member_method, 'has_members': self.has_members_method, 'has_type': self.has_type_method, 'alignment': self.alignment_method, 'version': self.version_method, 'cmd_array': self.cmd_array_method, 'find_library': self.find_library_method, 'has_argument': self.has_argument_method, 'has_function_attribute': self.has_func_attribute_method, 'get_supported_function_attributes': self.get_supported_function_attributes_method, 'has_multi_arguments': self.has_multi_arguments_method, 'get_supported_arguments': self.get_supported_arguments_method, 'first_supported_argument': self.first_supported_argument_method, 'has_link_argument': self.has_link_argument_method, 'has_multi_link_arguments': self.has_multi_link_arguments_method, 'get_supported_link_arguments': self.get_supported_link_arguments_method, 'first_supported_link_argument': self.first_supported_link_argument_method, 'unittest_args': self.unittest_args_method, 'symbols_have_underscore_prefix': self.symbols_have_underscore_prefix_method, 'get_argument_syntax': self.get_argument_syntax_method, }) def _dep_msg(self, deps, endl): msg_single = 'with dependency {}' msg_many = 'with dependencies {}' if not deps: return endl if endl is None: endl = '' tpl = msg_many if len(deps) > 1 else msg_single names = [] for d in deps: if isinstance(d, dependencies.ExternalLibrary): name = '-l' + d.name else: name = d.name names.append(name) return tpl.format(', '.join(names)) + endl @noPosargs @permittedKwargs({}) def version_method(self, args, kwargs): return self.compiler.version @noPosargs @permittedKwargs({}) def cmd_array_method(self, args, kwargs): return self.compiler.exelist def determine_args(self, kwargs, mode='link'): nobuiltins = kwargs.get('no_builtin_args', False) if not isinstance(nobuiltins, bool): raise InterpreterException('Type of no_builtin_args not a boolean.') args = [] incdirs = extract_as_list(kwargs, 'include_directories') for i in incdirs: if not isinstance(i, IncludeDirsHolder): raise InterpreterException('Include directories argument must be an include_directories object.') for idir in i.held_object.get_incdirs(): idir = os.path.join(self.environment.get_source_dir(), i.held_object.get_curdir(), idir) args += self.compiler.get_include_args(idir, False) if not nobuiltins: for_machine = Interpreter.machine_from_native_kwarg(kwargs) opts = self.environment.coredata.compiler_options[for_machine] args += self.compiler.get_option_compile_args(opts) if mode == 'link': args += self.compiler.get_option_link_args(opts) args += mesonlib.stringlistify(kwargs.get('args', [])) return args def determine_dependencies(self, kwargs, endl=':'): deps = kwargs.get('dependencies', None) if deps is not None: deps = listify(deps) final_deps = [] for d in deps: try: d = d.held_object except Exception: pass if isinstance(d, InternalDependency) or not isinstance(d, Dependency): raise InterpreterException('Dependencies must be external dependencies') final_deps.append(d) deps = final_deps return deps, self._dep_msg(deps, endl) @permittedKwargs({ 'prefix', 'args', 'dependencies', }) def alignment_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Alignment method takes exactly one positional argument.') check_stringlist(args) typename = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of alignment must be a string.') extra_args = mesonlib.stringlistify(kwargs.get('args', [])) deps, msg = self.determine_dependencies(kwargs) result = self.compiler.alignment(typename, prefix, self.environment, extra_args=extra_args, dependencies=deps) mlog.log('Checking for alignment of', mlog.bold(typename, True), msg, result) return result @permittedKwargs({ 'name', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def run_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Run method takes exactly one positional argument.') code = args[0] if isinstance(code, mesonlib.File): code = mesonlib.File.from_absolute_file( code.rel_to_builddir(self.environment.source_dir)) elif not isinstance(code, str): raise InvalidArguments('Argument must be string or file.') testname = kwargs.get('name', '') if not isinstance(testname, str): raise InterpreterException('Testname argument must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs, endl=None) result = self.compiler.run(code, self.environment, extra_args=extra_args, dependencies=deps) if len(testname) > 0: if not result.compiled: h = mlog.red('DID NOT COMPILE') elif result.returncode == 0: h = mlog.green('YES') else: h = mlog.red('NO (%d)' % result.returncode) mlog.log('Checking if', mlog.bold(testname, True), msg, 'runs:', h) return TryRunResultHolder(result) @noPosargs @permittedKwargs({}) def get_id_method(self, args, kwargs): return self.compiler.get_id() @noPosargs @permittedKwargs({}) def symbols_have_underscore_prefix_method(self, args, kwargs): ''' Check if the compiler prefixes _ (underscore) to global C symbols See: https://en.wikipedia.org/wiki/Name_mangling#C ''' return self.compiler.symbols_have_underscore_prefix(self.environment) @noPosargs @permittedKwargs({}) def unittest_args_method(self, args, kwargs): ''' This function is deprecated and should not be used. It can be removed in a future version of Meson. ''' if not hasattr(self.compiler, 'get_feature_args'): raise InterpreterException('This {} compiler has no feature arguments.'.format(self.compiler.get_display_language())) build_to_src = os.path.relpath(self.environment.get_source_dir(), self.environment.get_build_dir()) return self.compiler.get_feature_args({'unittest': 'true'}, build_to_src) @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def has_member_method(self, args, kwargs): if len(args) != 2: raise InterpreterException('Has_member takes exactly two arguments.') check_stringlist(args) typename, membername = args prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_member must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) had, cached = self.compiler.has_members(typename, [membername], prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if had: hadtxt = mlog.green('YES') else: hadtxt = mlog.red('NO') mlog.log('Checking whether type', mlog.bold(typename, True), 'has member', mlog.bold(membername, True), msg, hadtxt, cached) return had @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def has_members_method(self, args, kwargs): if len(args) < 2: raise InterpreterException('Has_members needs at least two arguments.') check_stringlist(args) typename, *membernames = args prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_members must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) had, cached = self.compiler.has_members(typename, membernames, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if had: hadtxt = mlog.green('YES') else: hadtxt = mlog.red('NO') members = mlog.bold(', '.join(['"{}"'.format(m) for m in membernames])) mlog.log('Checking whether type', mlog.bold(typename, True), 'has members', members, msg, hadtxt, cached) return had @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def has_function_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Has_function takes exactly one argument.') check_stringlist(args) funcname = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_function must be a string.') extra_args = self.determine_args(kwargs) deps, msg = self.determine_dependencies(kwargs) had, cached = self.compiler.has_function(funcname, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if had: hadtxt = mlog.green('YES') else: hadtxt = mlog.red('NO') mlog.log('Checking for function', mlog.bold(funcname, True), msg, hadtxt, cached) return had @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def has_type_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Has_type takes exactly one argument.') check_stringlist(args) typename = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_type must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) had, cached = self.compiler.has_type(typename, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if had: hadtxt = mlog.green('YES') else: hadtxt = mlog.red('NO') mlog.log('Checking for type', mlog.bold(typename, True), msg, hadtxt, cached) return had @FeatureNew('compiler.compute_int', '0.40.0') @permittedKwargs({ 'prefix', 'low', 'high', 'guess', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def compute_int_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Compute_int takes exactly one argument.') check_stringlist(args) expression = args[0] prefix = kwargs.get('prefix', '') low = kwargs.get('low', None) high = kwargs.get('high', None) guess = kwargs.get('guess', None) if not isinstance(prefix, str): raise InterpreterException('Prefix argument of compute_int must be a string.') if low is not None and not isinstance(low, int): raise InterpreterException('Low argument of compute_int must be an int.') if high is not None and not isinstance(high, int): raise InterpreterException('High argument of compute_int must be an int.') if guess is not None and not isinstance(guess, int): raise InterpreterException('Guess argument of compute_int must be an int.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) res = self.compiler.compute_int(expression, low, high, guess, prefix, self.environment, extra_args=extra_args, dependencies=deps) mlog.log('Computing int of', mlog.bold(expression, True), msg, res) return res @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def sizeof_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Sizeof takes exactly one argument.') check_stringlist(args) element = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of sizeof must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) esize = self.compiler.sizeof(element, prefix, self.environment, extra_args=extra_args, dependencies=deps) mlog.log('Checking for size of', mlog.bold(element, True), msg, esize) return esize @FeatureNew('compiler.get_define', '0.40.0') @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def get_define_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('get_define() takes exactly one argument.') check_stringlist(args) element = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of get_define() must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) value, cached = self.compiler.get_define(element, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' mlog.log('Fetching value of define', mlog.bold(element, True), msg, value, cached) return value @permittedKwargs({ 'name', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def compiles_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('compiles method takes exactly one argument.') code = args[0] if isinstance(code, mesonlib.File): code = mesonlib.File.from_absolute_file( code.rel_to_builddir(self.environment.source_dir)) elif not isinstance(code, str): raise InvalidArguments('Argument must be string or file.') testname = kwargs.get('name', '') if not isinstance(testname, str): raise InterpreterException('Testname argument must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs, endl=None) result, cached = self.compiler.compiles(code, self.environment, extra_args=extra_args, dependencies=deps) if len(testname) > 0: if result: h = mlog.green('YES') else: h = mlog.red('NO') cached = mlog.blue('(cached)') if cached else '' mlog.log('Checking if', mlog.bold(testname, True), msg, 'compiles:', h, cached) return result @permittedKwargs({ 'name', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def links_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('links method takes exactly one argument.') code = args[0] if isinstance(code, mesonlib.File): code = mesonlib.File.from_absolute_file( code.rel_to_builddir(self.environment.source_dir)) elif not isinstance(code, str): raise InvalidArguments('Argument must be string or file.') testname = kwargs.get('name', '') if not isinstance(testname, str): raise InterpreterException('Testname argument must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs, endl=None) result, cached = self.compiler.links(code, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if len(testname) > 0: if result: h = mlog.green('YES') else: h = mlog.red('NO') mlog.log('Checking if', mlog.bold(testname, True), msg, 'links:', h, cached) return result @FeatureNew('compiler.check_header', '0.47.0') @FeatureNewKwargs('compiler.check_header', '0.50.0', ['required']) @permittedKwargs(header_permitted_kwargs) def check_header_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('check_header method takes exactly one argument.') check_stringlist(args) hname = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_header must be a string.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False) if disabled: mlog.log('Check usable header', mlog.bold(hname, True), 'skipped: feature', mlog.bold(feature), 'disabled') return False extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) haz, cached = self.compiler.check_header(hname, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if required and not haz: raise InterpreterException('{} header {!r} not usable'.format(self.compiler.get_display_language(), hname)) elif haz: h = mlog.green('YES') else: h = mlog.red('NO') mlog.log('Check usable header', mlog.bold(hname, True), msg, h, cached) return haz @FeatureNewKwargs('compiler.has_header', '0.50.0', ['required']) @permittedKwargs(header_permitted_kwargs) def has_header_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('has_header method takes exactly one argument.') check_stringlist(args) hname = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_header must be a string.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False) if disabled: mlog.log('Has header', mlog.bold(hname, True), 'skipped: feature', mlog.bold(feature), 'disabled') return False extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) haz, cached = self.compiler.has_header(hname, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if required and not haz: raise InterpreterException('{} header {!r} not found'.format(self.compiler.get_display_language(), hname)) elif haz: h = mlog.green('YES') else: h = mlog.red('NO') mlog.log('Has header', mlog.bold(hname, True), msg, h, cached) return haz @FeatureNewKwargs('compiler.has_header_symbol', '0.50.0', ['required']) @permittedKwargs(header_permitted_kwargs) def has_header_symbol_method(self, args, kwargs): if len(args) != 2: raise InterpreterException('has_header_symbol method takes exactly two arguments.') check_stringlist(args) hname, symbol = args prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_header_symbol must be a string.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False) if disabled: mlog.log('Header <{0}> has symbol'.format(hname), mlog.bold(symbol, True), 'skipped: feature', mlog.bold(feature), 'disabled') return False extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) haz, cached = self.compiler.has_header_symbol(hname, symbol, prefix, self.environment, extra_args=extra_args, dependencies=deps) if required and not haz: raise InterpreterException('{} symbol {} not found in header {}'.format(self.compiler.get_display_language(), symbol, hname)) elif haz: h = mlog.green('YES') else: h = mlog.red('NO') cached = mlog.blue('(cached)') if cached else '' mlog.log('Header <{0}> has symbol'.format(hname), mlog.bold(symbol, True), msg, h, cached) return haz def notfound_library(self, libname): lib = dependencies.ExternalLibrary(libname, None, self.environment, self.compiler.language, silent=True) return ExternalLibraryHolder(lib, self.subproject) @FeatureNewKwargs('compiler.find_library', '0.51.0', ['static']) @FeatureNewKwargs('compiler.find_library', '0.50.0', ['has_headers']) @FeatureNewKwargs('compiler.find_library', '0.49.0', ['disabler']) @disablerIfNotFound @permittedKwargs(find_library_permitted_kwargs) def find_library_method(self, args, kwargs): # TODO add dependencies support? if len(args) != 1: raise InterpreterException('find_library method takes one argument.') libname = args[0] if not isinstance(libname, str): raise InterpreterException('Library name not a string.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Library', mlog.bold(libname), 'skipped: feature', mlog.bold(feature), 'disabled') return self.notfound_library(libname) has_header_kwargs = {k[7:]: v for k, v in kwargs.items() if k.startswith('header_')} has_header_kwargs['required'] = required headers = mesonlib.stringlistify(kwargs.get('has_headers', [])) for h in headers: if not self.has_header_method([h], has_header_kwargs): return self.notfound_library(libname) search_dirs = mesonlib.stringlistify(kwargs.get('dirs', [])) search_dirs = [Path(d).expanduser() for d in search_dirs] for d in search_dirs: if mesonlib.is_windows() and d.root.startswith('\\'): # a Unix-path starting with `/` that is not absolute on Windows. # discard without failing for end-user ease of cross-platform directory arrays continue if not d.is_absolute(): raise InvalidCode('Search directory {} is not an absolute path.'.format(d)) search_dirs = list(map(str, search_dirs)) libtype = mesonlib.LibType.PREFER_SHARED if 'static' in kwargs: if not isinstance(kwargs['static'], bool): raise InterpreterException('static must be a boolean') libtype = mesonlib.LibType.STATIC if kwargs['static'] else mesonlib.LibType.SHARED linkargs = self.compiler.find_library(libname, self.environment, search_dirs, libtype) if required and not linkargs: raise InterpreterException( '{} library {!r} not found'.format(self.compiler.get_display_language(), libname)) lib = dependencies.ExternalLibrary(libname, linkargs, self.environment, self.compiler.language) return ExternalLibraryHolder(lib, self.subproject) @permittedKwargs({}) def has_argument_method(self, args, kwargs): args = mesonlib.stringlistify(args) if len(args) != 1: raise InterpreterException('has_argument takes exactly one argument.') return self.has_multi_arguments_method(args, kwargs) @permittedKwargs({}) def has_multi_arguments_method(self, args, kwargs): args = mesonlib.stringlistify(args) result, cached = self.compiler.has_multi_arguments(args, self.environment) if result: h = mlog.green('YES') else: h = mlog.red('NO') cached = mlog.blue('(cached)') if cached else '' mlog.log( 'Compiler for {} supports arguments {}:'.format( self.compiler.get_display_language(), ' '.join(args)), h, cached) return result @FeatureNew('compiler.get_supported_arguments', '0.43.0') @permittedKwargs({}) def get_supported_arguments_method(self, args, kwargs): args = mesonlib.stringlistify(args) supported_args = [] for arg in args: if self.has_argument_method(arg, kwargs): supported_args.append(arg) return supported_args @permittedKwargs({}) def first_supported_argument_method(self, args, kwargs): for i in mesonlib.stringlistify(args): if self.has_argument_method(i, kwargs): mlog.log('First supported argument:', mlog.bold(i)) return [i] mlog.log('First supported argument:', mlog.red('None')) return [] @FeatureNew('compiler.has_link_argument', '0.46.0') @permittedKwargs({}) def has_link_argument_method(self, args, kwargs): args = mesonlib.stringlistify(args) if len(args) != 1: raise InterpreterException('has_link_argument takes exactly one argument.') return self.has_multi_link_arguments_method(args, kwargs) @FeatureNew('compiler.has_multi_link_argument', '0.46.0') @permittedKwargs({}) def has_multi_link_arguments_method(self, args, kwargs): args = mesonlib.stringlistify(args) result, cached = self.compiler.has_multi_link_arguments(args, self.environment) cached = mlog.blue('(cached)') if cached else '' if result: h = mlog.green('YES') else: h = mlog.red('NO') mlog.log( 'Compiler for {} supports link arguments {}:'.format( self.compiler.get_display_language(), ' '.join(args)), h, cached) return result @FeatureNew('compiler.get_supported_link_arguments_method', '0.46.0') @permittedKwargs({}) def get_supported_link_arguments_method(self, args, kwargs): args = mesonlib.stringlistify(args) supported_args = [] for arg in args: if self.has_link_argument_method(arg, kwargs): supported_args.append(arg) return supported_args @FeatureNew('compiler.first_supported_link_argument_method', '0.46.0') @permittedKwargs({}) def first_supported_link_argument_method(self, args, kwargs): for i in mesonlib.stringlistify(args): if self.has_link_argument_method(i, kwargs): mlog.log('First supported link argument:', mlog.bold(i)) return [i] mlog.log('First supported link argument:', mlog.red('None')) return [] @FeatureNew('compiler.has_function_attribute', '0.48.0') @permittedKwargs({}) def has_func_attribute_method(self, args, kwargs): args = mesonlib.stringlistify(args) if len(args) != 1: raise InterpreterException('has_func_attribute takes exactly one argument.') result, cached = self.compiler.has_func_attribute(args[0], self.environment) cached = mlog.blue('(cached)') if cached else '' h = mlog.green('YES') if result else mlog.red('NO') mlog.log('Compiler for {} supports function attribute {}:'.format(self.compiler.get_display_language(), args[0]), h, cached) return result @FeatureNew('compiler.get_supported_function_attributes', '0.48.0') @permittedKwargs({}) def get_supported_function_attributes_method(self, args, kwargs): args = mesonlib.stringlistify(args) return [a for a in args if self.has_func_attribute_method(a, kwargs)] @FeatureNew('compiler.get_argument_syntax_method', '0.49.0') @noPosargs @noKwargs def get_argument_syntax_method(self, args, kwargs): return self.compiler.get_argument_syntax() ModuleState = namedtuple('ModuleState', [ 'source_root', 'build_to_src', 'subproject', 'subdir', 'current_lineno', 'environment', 'project_name', 'project_version', 'backend', 'targets', 'data', 'headers', 'man', 'global_args', 'project_args', 'build_machine', 'host_machine', 'target_machine', 'current_node']) class ModuleHolder(InterpreterObject, ObjectHolder): def __init__(self, modname, module, interpreter): InterpreterObject.__init__(self) ObjectHolder.__init__(self, module) self.modname = modname self.interpreter = interpreter def method_call(self, method_name, args, kwargs): try: fn = getattr(self.held_object, method_name) except AttributeError: raise InvalidArguments('Module %s does not have method %s.' % (self.modname, method_name)) if method_name.startswith('_'): raise InvalidArguments('Function {!r} in module {!r} is private.'.format(method_name, self.modname)) if not getattr(fn, 'no-args-flattening', False): args = flatten(args) # This is not 100% reliable but we can't use hash() # because the Build object contains dicts and lists. num_targets = len(self.interpreter.build.targets) state = ModuleState( source_root = self.interpreter.environment.get_source_dir(), build_to_src=mesonlib.relpath(self.interpreter.environment.get_source_dir(), self.interpreter.environment.get_build_dir()), subproject=self.interpreter.subproject, subdir=self.interpreter.subdir, current_lineno=self.interpreter.current_lineno, environment=self.interpreter.environment, project_name=self.interpreter.build.project_name, project_version=self.interpreter.build.dep_manifest[self.interpreter.active_projectname], # The backend object is under-used right now, but we will need it: # https://github.com/mesonbuild/meson/issues/1419 backend=self.interpreter.backend, targets=self.interpreter.build.targets, data=self.interpreter.build.data, headers=self.interpreter.build.get_headers(), man=self.interpreter.build.get_man(), #global_args_for_build = self.interpreter.build.global_args.build, global_args = self.interpreter.build.global_args.host, #project_args_for_build = self.interpreter.build.projects_args.build.get(self.interpreter.subproject, {}), project_args = self.interpreter.build.projects_args.host.get(self.interpreter.subproject, {}), build_machine=self.interpreter.builtin['build_machine'].held_object, host_machine=self.interpreter.builtin['host_machine'].held_object, target_machine=self.interpreter.builtin['target_machine'].held_object, current_node=self.current_node ) if self.held_object.is_snippet(method_name): value = fn(self.interpreter, state, args, kwargs) return self.interpreter.holderify(value) else: value = fn(state, args, kwargs) if num_targets != len(self.interpreter.build.targets): raise InterpreterException('Extension module altered internal state illegally.') return self.interpreter.module_method_callback(value) class MesonMain(InterpreterObject): def __init__(self, build, interpreter): InterpreterObject.__init__(self) self.build = build self.interpreter = interpreter self._found_source_scripts = {} self.methods.update({'get_compiler': self.get_compiler_method, 'is_cross_build': self.is_cross_build_method, 'has_exe_wrapper': self.has_exe_wrapper_method, 'is_unity': self.is_unity_method, 'is_subproject': self.is_subproject_method, 'current_source_dir': self.current_source_dir_method, 'current_build_dir': self.current_build_dir_method, 'source_root': self.source_root_method, 'build_root': self.build_root_method, 'add_install_script': self.add_install_script_method, 'add_postconf_script': self.add_postconf_script_method, 'add_dist_script': self.add_dist_script_method, 'install_dependency_manifest': self.install_dependency_manifest_method, 'override_find_program': self.override_find_program_method, 'project_version': self.project_version_method, 'project_license': self.project_license_method, 'version': self.version_method, 'project_name': self.project_name_method, 'get_cross_property': self.get_cross_property_method, 'backend': self.backend_method, }) def _find_source_script(self, name, args): # Prefer scripts in the current source directory search_dir = os.path.join(self.interpreter.environment.source_dir, self.interpreter.subdir) key = (name, search_dir) if key in self._found_source_scripts: found = self._found_source_scripts[key] else: found = dependencies.ExternalProgram(name, search_dir=search_dir) if found.found(): self._found_source_scripts[key] = found else: m = 'Script or command {!r} not found or not executable' raise InterpreterException(m.format(name)) return build.RunScript(found.get_command(), args) @permittedKwargs({}) def add_install_script_method(self, args, kwargs): if len(args) < 1: raise InterpreterException('add_install_script takes one or more arguments') check_stringlist(args, 'add_install_script args must be strings') script = self._find_source_script(args[0], args[1:]) self.build.install_scripts.append(script) @permittedKwargs({}) def add_postconf_script_method(self, args, kwargs): if len(args) < 1: raise InterpreterException('add_postconf_script takes one or more arguments') check_stringlist(args, 'add_postconf_script arguments must be strings') script = self._find_source_script(args[0], args[1:]) self.build.postconf_scripts.append(script) @permittedKwargs({}) def add_dist_script_method(self, args, kwargs): if len(args) < 1: raise InterpreterException('add_dist_script takes one or more arguments') if len(args) > 1: FeatureNew('Calling "add_dist_script" with multiple arguments', '0.49.0').use(self.interpreter.subproject) check_stringlist(args, 'add_dist_script argument must be a string') if self.interpreter.subproject != '': raise InterpreterException('add_dist_script may not be used in a subproject.') script = self._find_source_script(args[0], args[1:]) self.build.dist_scripts.append(script) @noPosargs @permittedKwargs({}) def current_source_dir_method(self, args, kwargs): src = self.interpreter.environment.source_dir sub = self.interpreter.subdir if sub == '': return src return os.path.join(src, sub) @noPosargs @permittedKwargs({}) def current_build_dir_method(self, args, kwargs): src = self.interpreter.environment.build_dir sub = self.interpreter.subdir if sub == '': return src return os.path.join(src, sub) @noPosargs @permittedKwargs({}) def backend_method(self, args, kwargs): return self.interpreter.backend.name @noPosargs @permittedKwargs({}) def source_root_method(self, args, kwargs): return self.interpreter.environment.source_dir @noPosargs @permittedKwargs({}) def build_root_method(self, args, kwargs): return self.interpreter.environment.build_dir @noPosargs @permittedKwargs({}) def has_exe_wrapper_method(self, args, kwargs): if self.is_cross_build_method(None, None) and \ self.build.environment.need_exe_wrapper(): if self.build.environment.exe_wrapper is None: return False # We return True when exe_wrap is defined, when it's not needed, and # when we're compiling natively. The last two are semantically confusing. # Need to revisit this. return True @noPosargs @permittedKwargs({}) def is_cross_build_method(self, args, kwargs): return self.build.environment.is_cross_build() @permittedKwargs({'native'}) def get_compiler_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('get_compiler_method must have one and only one argument.') cname = args[0] for_machine = Interpreter.machine_from_native_kwarg(kwargs) clist = self.interpreter.coredata.compilers[for_machine] if cname in clist: return CompilerHolder(clist[cname], self.build.environment, self.interpreter.subproject) raise InterpreterException('Tried to access compiler for unspecified language "%s".' % cname) @noPosargs @permittedKwargs({}) def is_unity_method(self, args, kwargs): optval = self.interpreter.environment.coredata.get_builtin_option('unity') if optval == 'on' or (optval == 'subprojects' and self.interpreter.is_subproject()): return True return False @noPosargs @permittedKwargs({}) def is_subproject_method(self, args, kwargs): return self.interpreter.is_subproject() @permittedKwargs({}) def install_dependency_manifest_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Must specify manifest install file name') if not isinstance(args[0], str): raise InterpreterException('Argument must be a string.') self.build.dep_manifest_name = args[0] @FeatureNew('meson.override_find_program', '0.46.0') @permittedKwargs({}) def override_find_program_method(self, args, kwargs): if len(args) != 2: raise InterpreterException('Override needs two arguments') name, exe = args if not isinstance(name, str): raise InterpreterException('First argument must be a string') if hasattr(exe, 'held_object'): exe = exe.held_object if isinstance(exe, mesonlib.File): abspath = exe.absolute_path(self.interpreter.environment.source_dir, self.interpreter.environment.build_dir) if not os.path.exists(abspath): raise InterpreterException('Tried to override %s with a file that does not exist.' % name) exe = OverrideProgram(abspath) if not isinstance(exe, (dependencies.ExternalProgram, build.Executable)): raise InterpreterException('Second argument must be an external program or executable.') self.interpreter.add_find_program_override(name, exe) @noPosargs @permittedKwargs({}) def project_version_method(self, args, kwargs): return self.build.dep_manifest[self.interpreter.active_projectname]['version'] @FeatureNew('meson.project_license()', '0.45.0') @noPosargs @permittedKwargs({}) def project_license_method(self, args, kwargs): return self.build.dep_manifest[self.interpreter.active_projectname]['license'] @noPosargs @permittedKwargs({}) def version_method(self, args, kwargs): return coredata.version @noPosargs @permittedKwargs({}) def project_name_method(self, args, kwargs): return self.interpreter.active_projectname @noArgsFlattening @permittedKwargs({}) def get_cross_property_method(self, args, kwargs): if len(args) < 1 or len(args) > 2: raise InterpreterException('Must have one or two arguments.') propname = args[0] if not isinstance(propname, str): raise InterpreterException('Property name must be string.') try: props = self.interpreter.environment.properties.host return props[propname] except Exception: if len(args) == 2: return args[1] raise InterpreterException('Unknown cross property: %s.' % propname) known_library_kwargs = ( build.known_shlib_kwargs | build.known_stlib_kwargs ) known_build_target_kwargs = ( known_library_kwargs | build.known_exe_kwargs | build.known_jar_kwargs | {'target_type'} ) _base_test_args = {'args', 'depends', 'env', 'should_fail', 'timeout', 'workdir', 'suite', 'priority', 'protocol'} permitted_kwargs = {'add_global_arguments': {'language', 'native'}, 'add_global_link_arguments': {'language', 'native'}, 'add_languages': {'required'}, 'add_project_link_arguments': {'language', 'native'}, 'add_project_arguments': {'language', 'native'}, 'add_test_setup': {'exe_wrapper', 'gdb', 'timeout_multiplier', 'env', 'is_default'}, 'benchmark': _base_test_args, 'build_target': known_build_target_kwargs, 'configure_file': {'input', 'output', 'configuration', 'command', 'copy', 'depfile', 'install_dir', 'install_mode', 'capture', 'install', 'format', 'output_format', 'encoding'}, 'custom_target': {'input', 'output', 'command', 'install', 'install_dir', 'install_mode', 'build_always', 'capture', 'depends', 'depend_files', 'depfile', 'build_by_default', 'build_always_stale', 'console'}, 'dependency': {'default_options', 'fallback', 'language', 'main', 'method', 'modules', 'cmake_module_path', 'optional_modules', 'native', 'not_found_message', 'required', 'static', 'version', 'private_headers', 'cmake_args', 'include_type', }, 'declare_dependency': {'include_directories', 'link_with', 'sources', 'dependencies', 'compile_args', 'link_args', 'link_whole', 'version', }, 'executable': build.known_exe_kwargs, 'find_program': {'required', 'native', 'version'}, 'generator': {'arguments', 'output', 'depends', 'depfile', 'capture', 'preserve_path_from'}, 'include_directories': {'is_system'}, 'install_data': {'install_dir', 'install_mode', 'rename', 'sources'}, 'install_headers': {'install_dir', 'install_mode', 'subdir'}, 'install_man': {'install_dir', 'install_mode'}, 'install_subdir': {'exclude_files', 'exclude_directories', 'install_dir', 'install_mode', 'strip_directory'}, 'jar': build.known_jar_kwargs, 'project': {'version', 'meson_version', 'default_options', 'license', 'subproject_dir'}, 'run_command': {'check', 'capture', 'env'}, 'run_target': {'command', 'depends'}, 'shared_library': build.known_shlib_kwargs, 'shared_module': build.known_shmod_kwargs, 'static_library': build.known_stlib_kwargs, 'both_libraries': known_library_kwargs, 'library': known_library_kwargs, 'subdir': {'if_found'}, 'subproject': {'version', 'default_options', 'required'}, 'test': set.union(_base_test_args, {'is_parallel'}), 'vcs_tag': {'input', 'output', 'fallback', 'command', 'replace_string'}, } class Interpreter(InterpreterBase): def __init__(self, build, backend=None, subproject='', subdir='', subproject_dir='subprojects', modules = None, default_project_options=None, mock=False, ast=None): super().__init__(build.environment.get_source_dir(), subdir) self.an_unpicklable_object = mesonlib.an_unpicklable_object self.build = build self.environment = build.environment self.coredata = self.environment.get_coredata() self.backend = backend self.subproject = subproject if modules is None: self.modules = {} else: self.modules = modules # Subproject directory is usually the name of the subproject, but can # be different for dependencies provided by wrap files. self.subproject_directory_name = subdir.split(os.path.sep)[-1] self.subproject_dir = subproject_dir self.option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt') if not mock and ast is None: self.load_root_meson_file() self.sanity_check_ast() elif ast is not None: self.ast = ast self.sanity_check_ast() self.builtin.update({'meson': MesonMain(build, self)}) self.generators = [] self.visited_subdirs = {} self.project_args_frozen = False self.global_args_frozen = False # implies self.project_args_frozen self.subprojects = {} self.subproject_stack = [] self.configure_file_outputs = {} # Passed from the outside, only used in subprojects. if default_project_options: self.default_project_options = default_project_options.copy() else: self.default_project_options = {} self.project_default_options = {} self.build_func_dict() # build_def_files needs to be defined before parse_project is called self.build_def_files = [os.path.join(self.subdir, environment.build_filename)] if not mock: self.parse_project() # Re-initialize machine descriptions. We can do a better job now because we # have the compilers needed to gain more knowledge, so wipe out old # inference and start over. machines = self.build.environment.machines.miss_defaulting() machines.build = environment.detect_machine_info(self.coredata.compilers.build) self.build.environment.machines = machines.default_missing() assert self.build.environment.machines.build.cpu is not None assert self.build.environment.machines.host.cpu is not None assert self.build.environment.machines.target.cpu is not None self.builtin['build_machine'] = \ MachineHolder(self.build.environment.machines.build) self.builtin['host_machine'] = \ MachineHolder(self.build.environment.machines.host) self.builtin['target_machine'] = \ MachineHolder(self.build.environment.machines.target) def get_non_matching_default_options(self): env = self.environment for def_opt_name, def_opt_value in self.project_default_options.items(): for opts in env.coredata.get_all_options(): cur_opt_value = opts.get(def_opt_name) if cur_opt_value is not None: def_opt_value = env.coredata.validate_option_value(def_opt_name, def_opt_value) if def_opt_value != cur_opt_value.value: yield (def_opt_name, def_opt_value, cur_opt_value) def build_func_dict(self): self.funcs.update({'add_global_arguments': self.func_add_global_arguments, 'add_project_arguments': self.func_add_project_arguments, 'add_global_link_arguments': self.func_add_global_link_arguments, 'add_project_link_arguments': self.func_add_project_link_arguments, 'add_test_setup': self.func_add_test_setup, 'add_languages': self.func_add_languages, 'alias_target': self.func_alias_target, 'assert': self.func_assert, 'benchmark': self.func_benchmark, 'build_target': self.func_build_target, 'configuration_data': self.func_configuration_data, 'configure_file': self.func_configure_file, 'custom_target': self.func_custom_target, 'declare_dependency': self.func_declare_dependency, 'dependency': self.func_dependency, 'disabler': self.func_disabler, 'environment': self.func_environment, 'error': self.func_error, 'executable': self.func_executable, 'generator': self.func_generator, 'gettext': self.func_gettext, 'get_option': self.func_get_option, 'get_variable': self.func_get_variable, 'files': self.func_files, 'find_library': self.func_find_library, 'find_program': self.func_find_program, 'include_directories': self.func_include_directories, 'import': self.func_import, 'install_data': self.func_install_data, 'install_headers': self.func_install_headers, 'install_man': self.func_install_man, 'install_subdir': self.func_install_subdir, 'is_disabler': self.func_is_disabler, 'is_variable': self.func_is_variable, 'jar': self.func_jar, 'join_paths': self.func_join_paths, 'library': self.func_library, 'message': self.func_message, 'warning': self.func_warning, 'option': self.func_option, 'project': self.func_project, 'run_target': self.func_run_target, 'run_command': self.func_run_command, 'set_variable': self.func_set_variable, 'subdir': self.func_subdir, 'subdir_done': self.func_subdir_done, 'subproject': self.func_subproject, 'shared_library': self.func_shared_lib, 'shared_module': self.func_shared_module, 'static_library': self.func_static_lib, 'both_libraries': self.func_both_lib, 'test': self.func_test, 'vcs_tag': self.func_vcs_tag }) if 'MESON_UNIT_TEST' in os.environ: self.funcs.update({'exception': self.func_exception}) def holderify(self, item): if isinstance(item, list): return [self.holderify(x) for x in item] if isinstance(item, dict): return {k: self.holderify(v) for k, v in item.items()} if isinstance(item, build.CustomTarget): return CustomTargetHolder(item, self) elif isinstance(item, (int, str, bool)) or item is None: return item elif isinstance(item, build.Executable): return ExecutableHolder(item, self) elif isinstance(item, build.GeneratedList): return GeneratedListHolder(item) elif isinstance(item, build.RunTarget): raise RuntimeError('This is not a pipe.') elif isinstance(item, build.RunScript): raise RuntimeError('Do not do this.') elif isinstance(item, build.Data): return DataHolder(item) elif isinstance(item, dependencies.Dependency): return DependencyHolder(item, self.subproject) elif isinstance(item, dependencies.ExternalProgram): return ExternalProgramHolder(item) elif hasattr(item, 'held_object'): return item else: raise InterpreterException('Module returned a value of unknown type.') def process_new_values(self, invalues): invalues = listify(invalues) for v in invalues: if isinstance(v, (RunTargetHolder, CustomTargetHolder, BuildTargetHolder)): v = v.held_object if isinstance(v, (build.BuildTarget, build.CustomTarget, build.RunTarget)): self.add_target(v.name, v) elif isinstance(v, list): self.module_method_callback(v) elif isinstance(v, build.GeneratedList): pass elif isinstance(v, build.RunScript): self.build.install_scripts.append(v) elif isinstance(v, build.Data): self.build.data.append(v) elif isinstance(v, dependencies.ExternalProgram): return ExternalProgramHolder(v) elif isinstance(v, dependencies.InternalDependency): # FIXME: This is special cased and not ideal: # The first source is our new VapiTarget, the rest are deps self.process_new_values(v.sources[0]) elif hasattr(v, 'held_object'): pass elif isinstance(v, (int, str, bool)): pass else: raise InterpreterException('Module returned a value of unknown type.') def module_method_callback(self, return_object): if not isinstance(return_object, ModuleReturnValue): raise InterpreterException('Bug in module, it returned an invalid object') invalues = return_object.new_objects self.process_new_values(invalues) return self.holderify(return_object.return_value) def get_build_def_files(self): return self.build_def_files def get_variables(self): return self.variables def check_stdlibs(self): for for_machine in MachineChoice: props = self.build.environment.properties[for_machine] for l in self.coredata.compilers[for_machine].keys(): try: di = mesonlib.stringlistify(props.get_stdlib(l)) if len(di) != 2: raise InterpreterException('Stdlib definition for %s should have exactly two elements.' % l) projname, depname = di subproj = self.do_subproject(projname, 'meson', {}) self.build.stdlibs.host[l] = subproj.get_variable_method([depname], {}) except KeyError: pass except InvalidArguments: pass @stringArgs @noKwargs def func_import(self, node, args, kwargs): if len(args) != 1: raise InvalidCode('Import takes one argument.') modname = args[0] if modname.startswith('unstable-'): plainname = modname.split('-', 1)[1] mlog.warning('Module %s has no backwards or forwards compatibility and might not exist in future releases.' % modname, location=node) modname = 'unstable_' + plainname if modname not in self.modules: try: module = importlib.import_module('mesonbuild.modules.' + modname) except ImportError: raise InvalidArguments('Module "%s" does not exist' % (modname, )) self.modules[modname] = module.initialize(self) return ModuleHolder(modname, self.modules[modname], self) @stringArgs @noKwargs def func_files(self, node, args, kwargs): return [mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, fname) for fname in args] @FeatureNewKwargs('declare_dependency', '0.46.0', ['link_whole']) @permittedKwargs(permitted_kwargs['declare_dependency']) @noPosargs def func_declare_dependency(self, node, args, kwargs): version = kwargs.get('version', self.project_version) if not isinstance(version, str): raise InterpreterException('Version must be a string.') incs = self.extract_incdirs(kwargs) libs = extract_as_list(kwargs, 'link_with', unholder=True) libs_whole = extract_as_list(kwargs, 'link_whole', unholder=True) sources = extract_as_list(kwargs, 'sources') sources = listify(self.source_strings_to_files(sources), unholder=True) deps = extract_as_list(kwargs, 'dependencies', unholder=True) compile_args = mesonlib.stringlistify(kwargs.get('compile_args', [])) link_args = mesonlib.stringlistify(kwargs.get('link_args', [])) final_deps = [] for d in deps: try: d = d.held_object except Exception: pass if not isinstance(d, (dependencies.Dependency, dependencies.ExternalLibrary, dependencies.InternalDependency)): raise InterpreterException('Dependencies must be external deps') final_deps.append(d) for l in libs: if isinstance(l, dependencies.Dependency): raise InterpreterException('''Entries in "link_with" may only be self-built targets, external dependencies (including libraries) must go to "dependencies".''') dep = dependencies.InternalDependency(version, incs, compile_args, link_args, libs, libs_whole, sources, final_deps) return DependencyHolder(dep, self.subproject) @noKwargs def func_assert(self, node, args, kwargs): if len(args) != 2: raise InterpreterException('Assert takes exactly two arguments') value, message = args if not isinstance(value, bool): raise InterpreterException('Assert value not bool.') if not isinstance(message, str): raise InterpreterException('Assert message not a string.') if not value: raise InterpreterException('Assert failed: ' + message) def validate_arguments(self, args, argcount, arg_types): if argcount is not None: if argcount != len(args): raise InvalidArguments('Expected %d arguments, got %d.' % (argcount, len(args))) for actual, wanted in zip(args, arg_types): if wanted is not None: if not isinstance(actual, wanted): raise InvalidArguments('Incorrect argument type.') @FeatureNewKwargs('run_command', '0.50.0', ['env']) @FeatureNewKwargs('run_command', '0.47.0', ['check', 'capture']) @permittedKwargs(permitted_kwargs['run_command']) def func_run_command(self, node, args, kwargs): return self.run_command_impl(node, args, kwargs) def run_command_impl(self, node, args, kwargs, in_builddir=False): if len(args) < 1: raise InterpreterException('Not enough arguments') cmd, *cargs = args capture = kwargs.get('capture', True) srcdir = self.environment.get_source_dir() builddir = self.environment.get_build_dir() check = kwargs.get('check', False) if not isinstance(check, bool): raise InterpreterException('Check must be boolean.') env = self.unpack_env_kwarg(kwargs) m = 'must be a string, or the output of find_program(), files() '\ 'or configure_file(), or a compiler object; not {!r}' expanded_args = [] if isinstance(cmd, ExternalProgramHolder): cmd = cmd.held_object if isinstance(cmd, build.Executable): progname = node.args.arguments[0].value msg = 'Program {!r} was overridden with the compiled executable {!r}'\ ' and therefore cannot be used during configuration' raise InterpreterException(msg.format(progname, cmd.description())) if not cmd.found(): raise InterpreterException('command {!r} not found or not executable'.format(cmd)) elif isinstance(cmd, CompilerHolder): exelist = cmd.compiler.get_exelist() cmd = exelist[0] prog = ExternalProgram(cmd, silent=True) if not prog.found(): raise InterpreterException('Program {!r} not found ' 'or not executable'.format(cmd)) cmd = prog expanded_args = exelist[1:] else: if isinstance(cmd, mesonlib.File): cmd = cmd.absolute_path(srcdir, builddir) elif not isinstance(cmd, str): raise InterpreterException('First argument ' + m.format(cmd)) # Prefer scripts in the current source directory search_dir = os.path.join(srcdir, self.subdir) prog = ExternalProgram(cmd, silent=True, search_dir=search_dir) if not prog.found(): raise InterpreterException('Program or command {!r} not found ' 'or not executable'.format(cmd)) cmd = prog cmd_path = mesonlib.relpath(cmd.get_path(), start=srcdir) if not cmd_path.startswith('..'): # On Windows, program on a different drive than srcdir won't have # an expressible relative path; cmd_path will be absolute instead. if not os.path.isabs(cmd_path): if cmd_path not in self.build_def_files: self.build_def_files.append(cmd_path) for a in listify(cargs): if isinstance(a, str): expanded_args.append(a) elif isinstance(a, mesonlib.File): expanded_args.append(a.absolute_path(srcdir, builddir)) elif isinstance(a, ExternalProgramHolder): expanded_args.append(a.held_object.get_path()) else: raise InterpreterException('Arguments ' + m.format(a)) # If any file that was used as an argument to the command # changes, we must re-run the configuration step. for a in expanded_args: if not os.path.isabs(a): a = os.path.join(builddir if in_builddir else srcdir, self.subdir, a) if os.path.isfile(a): a = mesonlib.relpath(a, start=srcdir) if a not in self.build_def_files: self.build_def_files.append(a) return RunProcess(cmd, expanded_args, env, srcdir, builddir, self.subdir, self.environment.get_build_command() + ['introspect'], in_builddir=in_builddir, check=check, capture=capture) @stringArgs def func_gettext(self, nodes, args, kwargs): raise InterpreterException('Gettext() function has been moved to module i18n. Import it and use i18n.gettext() instead') def func_option(self, nodes, args, kwargs): raise InterpreterException('Tried to call option() in build description file. All options must be in the option file.') @FeatureNewKwargs('subproject', '0.38.0', ['default_options']) @permittedKwargs(permitted_kwargs['subproject']) @stringArgs def func_subproject(self, nodes, args, kwargs): if len(args) != 1: raise InterpreterException('Subproject takes exactly one argument') dirname = args[0] return self.do_subproject(dirname, 'meson', kwargs) def disabled_subproject(self, dirname): self.subprojects[dirname] = SubprojectHolder(None, self.subproject_dir, dirname) return self.subprojects[dirname] def do_subproject(self, dirname: str, method: str, kwargs): disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Subproject', mlog.bold(dirname), ':', 'skipped: feature', mlog.bold(feature), 'disabled') return self.disabled_subproject(dirname) default_options = mesonlib.stringlistify(kwargs.get('default_options', [])) default_options = coredata.create_options_dict(default_options) if dirname == '': raise InterpreterException('Subproject dir name must not be empty.') if dirname[0] == '.': raise InterpreterException('Subproject dir name must not start with a period.') if '..' in dirname: raise InterpreterException('Subproject name must not contain a ".." path segment.') if os.path.isabs(dirname): raise InterpreterException('Subproject name must not be an absolute path.') if has_path_sep(dirname): mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.', location=self.current_node) if dirname in self.subproject_stack: fullstack = self.subproject_stack + [dirname] incpath = ' => '.join(fullstack) raise InvalidCode('Recursive include of subprojects: %s.' % incpath) if dirname in self.subprojects: subproject = self.subprojects[dirname] if required and not subproject.found(): raise InterpreterException('Subproject "%s/%s" required but not found.' % ( self.subproject_dir, dirname)) return subproject subproject_dir_abs = os.path.join(self.environment.get_source_dir(), self.subproject_dir) r = wrap.Resolver(subproject_dir_abs, self.coredata.get_builtin_option('wrap_mode')) try: resolved = r.resolve(dirname, method) except wrap.WrapException as e: subprojdir = os.path.join(self.subproject_dir, r.directory) if isinstance(e, wrap.WrapNotFoundException): # if the reason subproject execution failed was because # the directory doesn't exist, try to give some helpful # advice if it's a nested subproject that needs # promotion... self.print_nested_info(dirname) if not required: mlog.log(e) mlog.log('Subproject ', mlog.bold(subprojdir), 'is buildable:', mlog.red('NO'), '(disabling)') return self.disabled_subproject(dirname) raise e subdir = os.path.join(self.subproject_dir, resolved) subdir_abs = os.path.join(subproject_dir_abs, resolved) os.makedirs(os.path.join(self.build.environment.get_build_dir(), subdir), exist_ok=True) self.global_args_frozen = True mlog.log() with mlog.nested(): mlog.log('Executing subproject', mlog.bold(dirname), 'method', mlog.bold(method), '\n') try: if method == 'meson': return self._do_subproject_meson(dirname, subdir, default_options, kwargs) elif method == 'cmake': return self._do_subproject_cmake(dirname, subdir, subdir_abs, default_options, kwargs) else: raise InterpreterException('The method {} is invalid for the subproject {}'.format(method, dirname)) # Invalid code is always an error except InvalidCode: raise except Exception as e: if not required: with mlog.nested(): # Suppress the 'ERROR:' prefix because this exception is not # fatal and VS CI treat any logs with "ERROR:" as fatal. mlog.exception(e, prefix=mlog.yellow('Exception:')) mlog.log('\nSubproject', mlog.bold(dirname), 'is buildable:', mlog.red('NO'), '(disabling)') return self.disabled_subproject(dirname) raise e def _do_subproject_meson(self, dirname, subdir, default_options, kwargs, ast=None, build_def_files=None): with mlog.nested(): new_build = self.build.copy() subi = Interpreter(new_build, self.backend, dirname, subdir, self.subproject_dir, self.modules, default_options, ast=ast) subi.subprojects = self.subprojects subi.subproject_stack = self.subproject_stack + [dirname] current_active = self.active_projectname subi.run() mlog.log('Subproject', mlog.bold(dirname), 'finished.') mlog.log() if 'version' in kwargs: pv = subi.project_version wanted = kwargs['version'] if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]: raise InterpreterException('Subproject %s version is %s but %s required.' % (dirname, pv, wanted)) self.active_projectname = current_active self.subprojects.update(subi.subprojects) self.subprojects[dirname] = SubprojectHolder(subi, self.subproject_dir, dirname) # Duplicates are possible when subproject uses files from project root if build_def_files: self.build_def_files = list(set(self.build_def_files + build_def_files)) else: self.build_def_files = list(set(self.build_def_files + subi.build_def_files)) self.build.merge(subi.build) self.build.subprojects[dirname] = subi.project_version return self.subprojects[dirname] def _do_subproject_cmake(self, dirname, subdir, subdir_abs, default_options, kwargs): with mlog.nested(): new_build = self.build.copy() prefix = self.coredata.builtins['prefix'].value cmake_options = mesonlib.stringlistify(kwargs.get('cmake_options', [])) cm_int = CMakeInterpreter(new_build, subdir, subdir_abs, prefix, new_build.environment, self.backend) cm_int.initialise(cmake_options) cm_int.analyse() # Generate a meson ast and execute it with the normal do_subproject_meson ast = cm_int.pretend_to_be_meson() mlog.log() with mlog.nested(): mlog.log('Processing generated meson AST') # Debug print the generated meson file from .ast import AstIndentationGenerator, AstPrinter printer = AstPrinter() ast.accept(AstIndentationGenerator()) ast.accept(printer) printer.post_process() meson_filename = os.path.join(self.build.environment.get_build_dir(), subdir, 'meson.build') with open(meson_filename, "w") as f: f.write(printer.result) mlog.log('Build file:', meson_filename) mlog.cmd_ci_include(meson_filename) mlog.log() result = self._do_subproject_meson(dirname, subdir, default_options, kwargs, ast, cm_int.bs_files) result.cm_interpreter = cm_int mlog.log() return result def get_option_internal(self, optname): for opts in chain( [self.coredata.base_options, compilers.base_options, self.coredata.builtins], self.coredata.get_prefixed_options_per_machine(self.coredata.builtins_per_machine), self.coredata.get_prefixed_options_per_machine(self.coredata.compiler_options), ): v = opts.get(optname) if v is not None: return v raw_optname = optname if self.is_subproject(): optname = self.subproject + ':' + optname try: opt = self.coredata.user_options[optname] if opt.yielding and ':' in optname and raw_optname in self.coredata.user_options: popt = self.coredata.user_options[raw_optname] if type(opt) is type(popt): opt = popt else: # Get class name, then option type as a string opt_type = opt.__class__.__name__[4:][:-6].lower() popt_type = popt.__class__.__name__[4:][:-6].lower() # This is not a hard error to avoid dependency hell, the workaround # when this happens is to simply set the subproject's option directly. mlog.warning('Option {0!r} of type {1!r} in subproject {2!r} cannot yield ' 'to parent option of type {3!r}, ignoring parent value. ' 'Use -D{2}:{0}=value to set the value for this option manually' '.'.format(raw_optname, opt_type, self.subproject, popt_type), location=self.current_node) return opt except KeyError: pass raise InterpreterException('Tried to access unknown option "%s".' % optname) @stringArgs @noKwargs def func_get_option(self, nodes, args, kwargs): if len(args) != 1: raise InterpreterException('Argument required for get_option.') optname = args[0] if ':' in optname: raise InterpreterException('Having a colon in option name is forbidden, ' 'projects are not allowed to directly access ' 'options of other subprojects.') opt = self.get_option_internal(optname) if isinstance(opt, coredata.UserFeatureOption): return FeatureOptionHolder(self.environment, optname, opt) elif isinstance(opt, coredata.UserOption): return opt.value return opt @noKwargs def func_configuration_data(self, node, args, kwargs): if len(args) > 1: raise InterpreterException('configuration_data takes only one optional positional arguments') elif len(args) == 1: FeatureNew('configuration_data dictionary', '0.49.0').use(self.subproject) initial_values = args[0] if not isinstance(initial_values, dict): raise InterpreterException('configuration_data first argument must be a dictionary') else: initial_values = {} return ConfigurationDataHolder(self.subproject, initial_values) def set_backend(self): # The backend is already set when parsing subprojects if self.backend is not None: return backend = self.coredata.get_builtin_option('backend') from .backend import backends self.backend = backends.get_backend_from_name(backend, self.build) if self.backend is None: raise InterpreterException('Unknown backend "%s".' % backend) if backend != self.backend.name: if self.backend.name.startswith('vs'): mlog.log('Auto detected Visual Studio backend:', mlog.bold(self.backend.name)) self.coredata.set_builtin_option('backend', self.backend.name) # Only init backend options on first invocation otherwise it would # override values previously set from command line. if self.environment.first_invocation: self.coredata.init_backend_options(backend) options = {k: v for k, v in self.environment.cmd_line_options.items() if k.startswith('backend_')} self.coredata.set_options(options) @stringArgs @permittedKwargs(permitted_kwargs['project']) def func_project(self, node, args, kwargs): if len(args) < 1: raise InvalidArguments('Not enough arguments to project(). Needs at least the project name.') proj_name, *proj_langs = args if ':' in proj_name: raise InvalidArguments("Project name {!r} must not contain ':'".format(proj_name)) if 'meson_version' in kwargs: cv = coredata.version pv = kwargs['meson_version'] if not mesonlib.version_compare(cv, pv): raise InterpreterException('Meson version is %s but project requires %s' % (cv, pv)) if os.path.exists(self.option_file): oi = optinterpreter.OptionInterpreter(self.subproject) oi.process(self.option_file) self.coredata.merge_user_options(oi.options) # Do not set default_options on reconfigure otherwise it would override # values previously set from command line. That means that changing # default_options in a project will trigger a reconfigure but won't # have any effect. self.project_default_options = mesonlib.stringlistify(kwargs.get('default_options', [])) self.project_default_options = coredata.create_options_dict(self.project_default_options) if self.environment.first_invocation: default_options = self.project_default_options default_options.update(self.default_project_options) else: default_options = {} self.coredata.set_default_options(default_options, self.subproject, self.environment) if not self.is_subproject(): self.build.project_name = proj_name self.active_projectname = proj_name self.project_version = kwargs.get('version', 'undefined') if self.build.project_version is None: self.build.project_version = self.project_version proj_license = mesonlib.stringlistify(kwargs.get('license', 'unknown')) self.build.dep_manifest[proj_name] = {'version': self.project_version, 'license': proj_license} if self.subproject in self.build.projects: raise InvalidCode('Second call to project().') if not self.is_subproject() and 'subproject_dir' in kwargs: spdirname = kwargs['subproject_dir'] if not isinstance(spdirname, str): raise InterpreterException('Subproject_dir must be a string') if os.path.isabs(spdirname): raise InterpreterException('Subproject_dir must not be an absolute path.') if spdirname.startswith('.'): raise InterpreterException('Subproject_dir must not begin with a period.') if '..' in spdirname: raise InterpreterException('Subproject_dir must not contain a ".." segment.') self.subproject_dir = spdirname self.build.subproject_dir = self.subproject_dir mesonlib.project_meson_versions[self.subproject] = '' if 'meson_version' in kwargs: mesonlib.project_meson_versions[self.subproject] = kwargs['meson_version'] self.build.projects[self.subproject] = proj_name mlog.log('Project name:', mlog.bold(proj_name)) mlog.log('Project version:', mlog.bold(self.project_version)) self.add_languages(proj_langs, True) self.set_backend() if not self.is_subproject(): self.check_stdlibs() @permittedKwargs(permitted_kwargs['add_languages']) @stringArgs def func_add_languages(self, node, args, kwargs): disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: for lang in sorted(args, key=compilers.sort_clink): mlog.log('Compiler for language', mlog.bold(lang), 'skipped: feature', mlog.bold(feature), 'disabled') return False return self.add_languages(args, required) def get_message_string_arg(self, node): # reduce arguments again to avoid flattening posargs (posargs, _) = self.reduce_arguments(node.args) if len(posargs) != 1: raise InvalidArguments('Expected 1 argument, got %d' % len(posargs)) arg = posargs[0] if isinstance(arg, list): argstr = stringifyUserArguments(arg) elif isinstance(arg, dict): argstr = stringifyUserArguments(arg) elif isinstance(arg, str): argstr = arg elif isinstance(arg, int): argstr = str(arg) else: raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.') return argstr @noKwargs def func_message(self, node, args, kwargs): argstr = self.get_message_string_arg(node) self.message_impl(argstr) def message_impl(self, argstr): mlog.log(mlog.bold('Message:'), argstr) @FeatureNew('warning', '0.44.0') @noKwargs def func_warning(self, node, args, kwargs): argstr = self.get_message_string_arg(node) mlog.warning(argstr, location=node) @noKwargs def func_error(self, node, args, kwargs): self.validate_arguments(args, 1, [str]) raise InterpreterException('Problem encountered: ' + args[0]) @noKwargs def func_exception(self, node, args, kwargs): self.validate_arguments(args, 0, []) raise Exception() def add_languages(self, args: Sequence[str], required: bool) -> bool: success = self.add_languages_for(args, required, MachineChoice.BUILD) success &= self.add_languages_for(args, required, MachineChoice.HOST) if not self.coredata.is_cross_build(): self.coredata.copy_build_options_from_regular_ones() return success def add_languages_for(self, args, required, for_machine: MachineChoice): success = True for lang in sorted(args, key=compilers.sort_clink): lang = lang.lower() clist = self.coredata.compilers[for_machine] machine_name = for_machine.get_lower_case_name() if lang in clist: comp = clist[lang] else: try: comp = self.environment.detect_compiler_for(lang, for_machine) if comp is None: raise InvalidArguments('Tried to use unknown language "%s".' % lang) comp.sanity_check(self.environment.get_scratch_dir(), self.environment) except Exception: if not required: mlog.log('Compiler for language', mlog.bold(lang), 'for the', machine_name, 'machine not found.') success = False continue else: raise if for_machine == MachineChoice.HOST or self.environment.is_cross_build(): logger_fun = mlog.log else: logger_fun = mlog.debug logger_fun(comp.get_display_language(), 'compiler for the', machine_name, 'machine:', mlog.bold(' '.join(comp.get_exelist())), comp.get_version_string()) if comp.linker is not None: logger_fun(comp.get_display_language(), 'linker for the', machine_name, 'machine:', mlog.bold(' '.join(comp.linker.get_exelist())), comp.linker.id, comp.linker.version) self.build.ensure_static_linker(comp) langs = self.coredata.compilers[for_machine].keys() if 'vala' in langs: if 'c' not in langs: raise InterpreterException('Compiling Vala requires C. Add C to your project languages and rerun Meson.') return success def program_from_file_for(self, for_machine, prognames, silent): bins = self.environment.binaries[for_machine] for p in prognames: if hasattr(p, 'held_object'): p = p.held_object if isinstance(p, mesonlib.File): continue # Always points to a local (i.e. self generated) file. if not isinstance(p, str): raise InterpreterException('Executable name must be a string') prog = ExternalProgram.from_bin_list(bins, p) if prog.found(): return ExternalProgramHolder(prog) return None def program_from_system(self, args, silent=False): # Search for scripts relative to current subdir. # Do not cache found programs because find_program('foobar') # might give different results when run from different source dirs. source_dir = os.path.join(self.environment.get_source_dir(), self.subdir) for exename in args: if isinstance(exename, mesonlib.File): if exename.is_built: search_dir = os.path.join(self.environment.get_build_dir(), exename.subdir) else: search_dir = os.path.join(self.environment.get_source_dir(), exename.subdir) exename = exename.fname elif isinstance(exename, str): search_dir = source_dir else: raise InvalidArguments('find_program only accepts strings and ' 'files, not {!r}'.format(exename)) extprog = dependencies.ExternalProgram(exename, search_dir=search_dir, silent=silent) progobj = ExternalProgramHolder(extprog) if progobj.found(): return progobj def program_from_overrides(self, command_names, silent=False): for name in command_names: if not isinstance(name, str): continue if name in self.build.find_overrides: exe = self.build.find_overrides[name] if not silent: mlog.log('Program', mlog.bold(name), 'found:', mlog.green('YES'), '(overridden: %s)' % exe.description()) return ExternalProgramHolder(exe) return None def store_name_lookups(self, command_names): for name in command_names: if isinstance(name, str): self.build.searched_programs.add(name) def add_find_program_override(self, name, exe): if name in self.build.searched_programs: raise InterpreterException('Tried to override finding of executable "%s" which has already been found.' % name) if name in self.build.find_overrides: raise InterpreterException('Tried to override executable "%s" which has already been overridden.' % name) self.build.find_overrides[name] = exe # TODO update modules to always pass `for_machine`. It is bad-form to assume # the host machine. def find_program_impl(self, args, for_machine: MachineChoice = MachineChoice.HOST, required=True, silent=True, wanted=''): if not isinstance(args, list): args = [args] progobj = self.program_from_overrides(args, silent=silent) if progobj is None: progobj = self.program_from_file_for(for_machine, args, silent=silent) if progobj is None: progobj = self.program_from_system(args, silent=silent) if progobj is None and args[0].endswith('python3'): prog = dependencies.ExternalProgram('python3', mesonlib.python_command, silent=True) progobj = ExternalProgramHolder(prog) if required and (progobj is None or not progobj.found()): raise InvalidArguments('Program(s) {!r} not found or not executable'.format(args)) if progobj is None: return ExternalProgramHolder(dependencies.NonExistingExternalProgram()) # Only store successful lookups self.store_name_lookups(args) if wanted: version = progobj.get_version(self) is_found, not_found, found = mesonlib.version_compare_many(version, wanted) if not is_found: mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.red('NO'), 'found {!r} but need:'.format(version), ', '.join(["'{}'".format(e) for e in not_found])) if required: m = 'Invalid version of program, need {!r} {!r} found {!r}.' raise InvalidArguments(m.format(progobj.get_name(), not_found, version)) return ExternalProgramHolder(dependencies.NonExistingExternalProgram()) return progobj @FeatureNewKwargs('find_program', '0.52.0', ['version']) @FeatureNewKwargs('find_program', '0.49.0', ['disabler']) @disablerIfNotFound @permittedKwargs(permitted_kwargs['find_program']) def func_find_program(self, node, args, kwargs): if not args: raise InterpreterException('No program name specified.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Program', mlog.bold(' '.join(args)), 'skipped: feature', mlog.bold(feature), 'disabled') return ExternalProgramHolder(dependencies.NonExistingExternalProgram()) if not isinstance(required, bool): raise InvalidArguments('"required" argument must be a boolean.') wanted = mesonlib.stringlistify(kwargs.get('version', [])) for_machine = self.machine_from_native_kwarg(kwargs) return self.find_program_impl(args, for_machine, required=required, silent=False, wanted=wanted) def func_find_library(self, node, args, kwargs): raise InvalidCode('find_library() is removed, use meson.get_compiler(\'name\').find_library() instead.\n' 'Look here for documentation: http://mesonbuild.com/Reference-manual.html#compiler-object\n' 'Look here for example: http://mesonbuild.com/howtox.html#add-math-library-lm-portably\n' ) def _find_cached_dep(self, name, kwargs): # Check if we want this as a build-time / build machine or runt-time / # host machine dep. for_machine = self.machine_from_native_kwarg(kwargs) identifier = dependencies.get_dep_identifier(name, kwargs) cached_dep = self.coredata.deps[for_machine].get(identifier) if cached_dep: if not cached_dep.found(): mlog.log('Dependency', mlog.bold(name), 'found:', mlog.red('NO'), mlog.blue('(cached)')) return identifier, cached_dep # Verify the cached dep version match wanted_vers = kwargs.get('version', []) found_vers = cached_dep.get_version() if not wanted_vers or mesonlib.version_compare_many(found_vers, wanted_vers)[0]: info = [mlog.blue('(cached)')] if found_vers: info = [mlog.normal_cyan(found_vers), *info] mlog.log('Dependency', mlog.bold(name), 'found:', mlog.green('YES'), *info) return identifier, cached_dep return identifier, None @staticmethod def check_subproject_version(wanted, found): if wanted == 'undefined': return True if found == 'undefined' or not mesonlib.version_compare_many(found, wanted)[0]: return False return True def notfound_dependency(self): return DependencyHolder(NotFoundDependency(self.environment), self.subproject) def get_subproject_dep(self, display_name, dirname, varname, kwargs): dep = self.notfound_dependency() try: subproject = self.subprojects[dirname] if subproject.found(): dep = self.subprojects[dirname].get_variable_method([varname], {}) except InvalidArguments: pass if not isinstance(dep, DependencyHolder): raise InvalidCode('Fetched variable {!r} in the subproject {!r} is ' 'not a dependency object.'.format(varname, dirname)) required = kwargs.get('required', True) wanted = kwargs.get('version', 'undefined') subproj_path = os.path.join(self.subproject_dir, dirname) if not dep.found(): if required: raise DependencyException('Could not find dependency {} in subproject {}' ''.format(varname, dirname)) # If the dependency is not required, don't raise an exception mlog.log('Dependency', mlog.bold(display_name), 'from subproject', mlog.bold(subproj_path), 'found:', mlog.red('NO')) return dep found = dep.held_object.get_version() if not self.check_subproject_version(wanted, found): if required: raise DependencyException('Version {} of subproject dependency {} already ' 'cached, requested incompatible version {} for ' 'dep {}'.format(found, dirname, wanted, display_name)) mlog.log('Subproject', mlog.bold(subproj_path), 'dependency', mlog.bold(display_name), 'version is', mlog.normal_cyan(found), 'but', mlog.bold(wanted), 'is required.') return self.notfound_dependency() found = mlog.normal_cyan(found) if found else None mlog.log('Dependency', mlog.bold(display_name), 'from subproject', mlog.bold(subproj_path), 'found:', mlog.green('YES'), found) return dep def _handle_featurenew_dependencies(self, name): 'Do a feature check on dependencies used by this subproject' if name == 'mpi': FeatureNew('MPI Dependency', '0.42.0').use(self.subproject) elif name == 'pcap': FeatureNew('Pcap Dependency', '0.42.0').use(self.subproject) elif name == 'vulkan': FeatureNew('Vulkan Dependency', '0.42.0').use(self.subproject) elif name == 'libwmf': FeatureNew('LibWMF Dependency', '0.44.0').use(self.subproject) elif name == 'openmp': FeatureNew('OpenMP Dependency', '0.46.0').use(self.subproject) @FeatureNewKwargs('dependency', '0.52.0', ['include_type']) @FeatureNewKwargs('dependency', '0.50.0', ['not_found_message', 'cmake_module_path', 'cmake_args']) @FeatureNewKwargs('dependency', '0.49.0', ['disabler']) @FeatureNewKwargs('dependency', '0.40.0', ['method']) @FeatureNewKwargs('dependency', '0.38.0', ['default_options']) @disablerIfNotFound @permittedKwargs(permitted_kwargs['dependency']) def func_dependency(self, node, args, kwargs): self.validate_arguments(args, 1, [str]) name = args[0] display_name = name if name else '(anonymous)' not_found_message = kwargs.get('not_found_message', '') if not isinstance(not_found_message, str): raise InvalidArguments('The not_found_message must be a string.') try: d = self.dependency_impl(name, display_name, kwargs) except Exception: if not_found_message: self.message_impl(not_found_message) raise if not d.found() and not_found_message: self.message_impl(not_found_message) return d def dependency_impl(self, name, display_name, kwargs): disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Dependency', mlog.bold(display_name), 'skipped: feature', mlog.bold(feature), 'disabled') return self.notfound_dependency() has_fallback = 'fallback' in kwargs if 'default_options' in kwargs and not has_fallback: mlog.warning('The "default_options" keyworg argument does nothing without a "fallback" keyword argument.', location=self.current_node) # writing just "dependency('')" is an error, because it can only fail if name == '' and required and not has_fallback: raise InvalidArguments('Dependency is both required and not-found') if '<' in name or '>' in name or '=' in name: raise InvalidArguments('Characters <, > and = are forbidden in dependency names. To specify' 'version\n requirements use the \'version\' keyword argument instead.') identifier, cached_dep = self._find_cached_dep(name, kwargs) if cached_dep: if required and not cached_dep.found(): m = 'Dependency {!r} was already checked and was not found' raise DependencyException(m.format(display_name)) return DependencyHolder(cached_dep, self.subproject) # If the dependency has already been configured, possibly by # a higher level project, try to use it first. if has_fallback: dirname, varname = self.get_subproject_infos(kwargs) if dirname in self.subprojects: return self.get_subproject_dep(name, dirname, varname, kwargs) wrap_mode = self.coredata.get_builtin_option('wrap_mode') forcefallback = wrap_mode == WrapMode.forcefallback and has_fallback if name != '' and not forcefallback: self._handle_featurenew_dependencies(name) kwargs['required'] = required and not has_fallback dep = dependencies.find_external_dependency(name, self.environment, kwargs) kwargs['required'] = required # Only store found-deps in the cache # Never add fallback deps to self.coredata.deps since we # cannot cache them. They must always be evaluated else # we won't actually read all the build files. if dep.found(): for_machine = self.machine_from_native_kwarg(kwargs) self.coredata.deps[for_machine].put(identifier, dep) return DependencyHolder(dep, self.subproject) if has_fallback: return self.dependency_fallback(display_name, kwargs) return self.notfound_dependency() @FeatureNew('disabler', '0.44.0') @noKwargs @noPosargs def func_disabler(self, node, args, kwargs): return Disabler() def print_nested_info(self, dependency_name): message = ['Dependency', mlog.bold(dependency_name), 'not found but it is available in a sub-subproject.\n' + 'To use it in the current project, promote it by going in the project source\n' 'root and issuing'] sprojs = mesonlib.detect_subprojects('subprojects', self.source_root) if dependency_name not in sprojs: return found = sprojs[dependency_name] if len(found) > 1: message.append('one of the following commands:') else: message.append('the following command:') command_templ = '\nmeson wrap promote {}' for l in found: message.append(mlog.bold(command_templ.format(l[len(self.source_root) + 1:]))) mlog.warning(*message, location=self.current_node) def get_subproject_infos(self, kwargs): fbinfo = kwargs['fallback'] check_stringlist(fbinfo) if len(fbinfo) != 2: raise InterpreterException('Fallback info must have exactly two items.') return fbinfo def dependency_fallback(self, display_name, kwargs): if self.coredata.get_builtin_option('wrap_mode') == WrapMode.nofallback: mlog.log('Not looking for a fallback subproject for the dependency', mlog.bold(display_name), 'because:\nUse of fallback' 'dependencies is disabled.') return self.notfound_dependency() elif self.coredata.get_builtin_option('wrap_mode') == WrapMode.forcefallback: mlog.log('Looking for a fallback subproject for the dependency', mlog.bold(display_name), 'because:\nUse of fallback dependencies is forced.') else: mlog.log('Looking for a fallback subproject for the dependency', mlog.bold(display_name)) dirname, varname = self.get_subproject_infos(kwargs) sp_kwargs = { 'default_options': kwargs.get('default_options', []), 'required': kwargs.get('required', True), } self.do_subproject(dirname, 'meson', sp_kwargs) return self.get_subproject_dep(display_name, dirname, varname, kwargs) @FeatureNewKwargs('executable', '0.42.0', ['implib']) @permittedKwargs(permitted_kwargs['executable']) def func_executable(self, node, args, kwargs): return self.build_target(node, args, kwargs, ExecutableHolder) @permittedKwargs(permitted_kwargs['static_library']) def func_static_lib(self, node, args, kwargs): return self.build_target(node, args, kwargs, StaticLibraryHolder) @permittedKwargs(permitted_kwargs['shared_library']) def func_shared_lib(self, node, args, kwargs): holder = self.build_target(node, args, kwargs, SharedLibraryHolder) holder.held_object.shared_library_only = True return holder @permittedKwargs(permitted_kwargs['both_libraries']) def func_both_lib(self, node, args, kwargs): return self.build_both_libraries(node, args, kwargs) @FeatureNew('shared_module', '0.37.0') @permittedKwargs(permitted_kwargs['shared_module']) def func_shared_module(self, node, args, kwargs): return self.build_target(node, args, kwargs, SharedModuleHolder) @permittedKwargs(permitted_kwargs['library']) def func_library(self, node, args, kwargs): return self.build_library(node, args, kwargs) @permittedKwargs(permitted_kwargs['jar']) def func_jar(self, node, args, kwargs): return self.build_target(node, args, kwargs, JarHolder) @FeatureNewKwargs('build_target', '0.40.0', ['link_whole', 'override_options']) @permittedKwargs(permitted_kwargs['build_target']) def func_build_target(self, node, args, kwargs): if 'target_type' not in kwargs: raise InterpreterException('Missing target_type keyword argument') target_type = kwargs.pop('target_type') if target_type == 'executable': return self.build_target(node, args, kwargs, ExecutableHolder) elif target_type == 'shared_library': return self.build_target(node, args, kwargs, SharedLibraryHolder) elif target_type == 'shared_module': FeatureNew('build_target(target_type: \'shared_module\')', '0.51.0').use(self.subproject) return self.build_target(node, args, kwargs, SharedModuleHolder) elif target_type == 'static_library': return self.build_target(node, args, kwargs, StaticLibraryHolder) elif target_type == 'both_libraries': return self.build_both_libraries(node, args, kwargs) elif target_type == 'library': return self.build_library(node, args, kwargs) elif target_type == 'jar': return self.build_target(node, args, kwargs, JarHolder) else: raise InterpreterException('Unknown target_type.') @permittedKwargs(permitted_kwargs['vcs_tag']) def func_vcs_tag(self, node, args, kwargs): if 'input' not in kwargs or 'output' not in kwargs: raise InterpreterException('Keyword arguments input and output must exist') if 'fallback' not in kwargs: FeatureNew('Optional fallback in vcs_tag', '0.41.0').use(self.subproject) fallback = kwargs.pop('fallback', self.project_version) if not isinstance(fallback, str): raise InterpreterException('Keyword argument fallback must be a string.') replace_string = kwargs.pop('replace_string', '@VCS_TAG@') regex_selector = '(.*)' # default regex selector for custom command: use complete output vcs_cmd = kwargs.get('command', None) if vcs_cmd and not isinstance(vcs_cmd, list): vcs_cmd = [vcs_cmd] source_dir = os.path.normpath(os.path.join(self.environment.get_source_dir(), self.subdir)) if vcs_cmd: # Is the command an executable in path or maybe a script in the source tree? vcs_cmd[0] = shutil.which(vcs_cmd[0]) or os.path.join(source_dir, vcs_cmd[0]) else: vcs = mesonlib.detect_vcs(source_dir) if vcs: mlog.log('Found %s repository at %s' % (vcs['name'], vcs['wc_dir'])) vcs_cmd = vcs['get_rev'].split() regex_selector = vcs['rev_regex'] else: vcs_cmd = [' '] # executing this cmd will fail in vcstagger.py and force to use the fallback string # vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command... kwargs['command'] = self.environment.get_build_command() + \ ['--internal', 'vcstagger', '@INPUT0@', '@OUTPUT0@', fallback, source_dir, replace_string, regex_selector] + vcs_cmd kwargs.setdefault('build_by_default', True) kwargs.setdefault('build_always_stale', True) return self._func_custom_target_impl(node, [kwargs['output']], kwargs) @FeatureNew('subdir_done', '0.46.0') @stringArgs def func_subdir_done(self, node, args, kwargs): if len(kwargs) > 0: raise InterpreterException('exit does not take named arguments') if len(args) > 0: raise InterpreterException('exit does not take any arguments') raise SubdirDoneRequest() @stringArgs @FeatureNewKwargs('custom_target', '0.48.0', ['console']) @FeatureNewKwargs('custom_target', '0.47.0', ['install_mode', 'build_always_stale']) @FeatureNewKwargs('custom_target', '0.40.0', ['build_by_default']) @permittedKwargs(permitted_kwargs['custom_target']) def func_custom_target(self, node, args, kwargs): if len(args) != 1: raise InterpreterException('custom_target: Only one positional argument is allowed, and it must be a string name') if 'depfile' in kwargs and ('@BASENAME@' in kwargs['depfile'] or '@PLAINNAME@' in kwargs['depfile']): FeatureNew('substitutions in custom_target depfile', '0.47.0').use(self.subproject) return self._func_custom_target_impl(node, args, kwargs) def _func_custom_target_impl(self, node, args, kwargs): 'Implementation-only, without FeatureNew checks, for internal use' name = args[0] kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) if 'input' in kwargs: try: kwargs['input'] = self.source_strings_to_files(extract_as_list(kwargs, 'input')) except mesonlib.MesonException: mlog.warning('''Custom target input \'%s\' can\'t be converted to File object(s). This will become a hard error in the future.''' % kwargs['input'], location=self.current_node) tg = CustomTargetHolder(build.CustomTarget(name, self.subdir, self.subproject, kwargs, backend=self.backend), self) self.add_target(name, tg.held_object) return tg @permittedKwargs(permitted_kwargs['run_target']) def func_run_target(self, node, args, kwargs): if len(args) > 1: raise InvalidCode('Run_target takes only one positional argument: the target name.') elif len(args) == 1: if 'command' not in kwargs: raise InterpreterException('Missing "command" keyword argument') all_args = extract_as_list(kwargs, 'command') deps = extract_as_list(kwargs, 'depends', unholder=True) else: raise InterpreterException('Run_target needs at least one positional argument.') cleaned_args = [] for i in listify(all_args, unholder=True): if not isinstance(i, (str, build.BuildTarget, build.CustomTarget, dependencies.ExternalProgram, mesonlib.File)): mlog.debug('Wrong type:', str(i)) raise InterpreterException('Invalid argument to run_target.') if isinstance(i, dependencies.ExternalProgram) and not i.found(): raise InterpreterException('Tried to use non-existing executable {!r}'.format(i.name)) cleaned_args.append(i) name = args[0] if not isinstance(name, str): raise InterpreterException('First argument must be a string.') cleaned_deps = [] for d in deps: if not isinstance(d, (build.BuildTarget, build.CustomTarget)): raise InterpreterException('Depends items must be build targets.') cleaned_deps.append(d) command, *cmd_args = cleaned_args tg = RunTargetHolder(build.RunTarget(name, command, cmd_args, cleaned_deps, self.subdir, self.subproject), self) self.add_target(name, tg.held_object) full_name = (self.subproject, name) assert(full_name not in self.build.run_target_names) self.build.run_target_names.add(full_name) return tg @FeatureNew('alias_target', '0.52.0') @noKwargs def func_alias_target(self, node, args, kwargs): if len(args) < 2: raise InvalidCode('alias_target takes at least 2 arguments.') name = args[0] if not isinstance(name, str): raise InterpreterException('First argument must be a string.') deps = listify(args[1:], unholder=True) for d in deps: if not isinstance(d, (build.BuildTarget, build.CustomTarget)): raise InterpreterException('Depends items must be build targets.') tg = RunTargetHolder(build.AliasTarget(name, deps, self.subdir, self.subproject), self) self.add_target(name, tg.held_object) return tg @permittedKwargs(permitted_kwargs['generator']) def func_generator(self, node, args, kwargs): gen = GeneratorHolder(self, args, kwargs) self.generators.append(gen) return gen @FeatureNewKwargs('benchmark', '0.46.0', ['depends']) @FeatureNewKwargs('benchmark', '0.52.0', ['priority']) @permittedKwargs(permitted_kwargs['benchmark']) def func_benchmark(self, node, args, kwargs): # is_parallel isn't valid here, so make sure it isn't passed if 'is_parallel' in kwargs: del kwargs['is_parallel'] self.add_test(node, args, kwargs, False) @FeatureNewKwargs('test', '0.46.0', ['depends']) @FeatureNewKwargs('test', '0.52.0', ['priority']) @permittedKwargs(permitted_kwargs['test']) def func_test(self, node, args, kwargs): self.add_test(node, args, kwargs, True) def unpack_env_kwarg(self, kwargs) -> build.EnvironmentVariables: envlist = kwargs.get('env', EnvironmentVariablesHolder()) if isinstance(envlist, EnvironmentVariablesHolder): env = envlist.held_object elif isinstance(envlist, dict): FeatureNew('environment dictionary', '0.52.0').use(self.subproject) env = EnvironmentVariablesHolder(envlist) env = env.held_object else: envlist = listify(envlist) # Convert from array to environment object env = EnvironmentVariablesHolder(envlist) env = env.held_object return env def add_test(self, node, args, kwargs, is_base_test): if len(args) != 2: raise InterpreterException('Incorrect number of arguments') if not isinstance(args[0], str): raise InterpreterException('First argument of test must be a string.') exe = args[1] if not isinstance(exe, (ExecutableHolder, JarHolder, ExternalProgramHolder)): if isinstance(exe, mesonlib.File): exe = self.func_find_program(node, args[1], {}) else: raise InterpreterException('Second argument must be executable.') par = kwargs.get('is_parallel', True) if not isinstance(par, bool): raise InterpreterException('Keyword argument is_parallel must be a boolean.') cmd_args = extract_as_list(kwargs, 'args', unholder=True) for i in cmd_args: if not isinstance(i, (str, mesonlib.File, build.Target)): raise InterpreterException('Command line arguments must be strings, files or targets.') env = self.unpack_env_kwarg(kwargs) should_fail = kwargs.get('should_fail', False) if not isinstance(should_fail, bool): raise InterpreterException('Keyword argument should_fail must be a boolean.') timeout = kwargs.get('timeout', 30) if 'workdir' in kwargs: workdir = kwargs['workdir'] if not isinstance(workdir, str): raise InterpreterException('Workdir keyword argument must be a string.') if not os.path.isabs(workdir): raise InterpreterException('Workdir keyword argument must be an absolute path.') else: workdir = None if not isinstance(timeout, int): raise InterpreterException('Timeout must be an integer.') protocol = kwargs.get('protocol', 'exitcode') if protocol not in ('exitcode', 'tap'): raise InterpreterException('Protocol must be "exitcode" or "tap".') suite = [] prj = self.subproject if self.is_subproject() else self.build.project_name for s in mesonlib.stringlistify(kwargs.get('suite', '')): if len(s) > 0: s = ':' + s suite.append(prj.replace(' ', '_').replace(':', '_') + s) depends = extract_as_list(kwargs, 'depends', unholder=True) for dep in depends: if not isinstance(dep, (build.CustomTarget, build.BuildTarget)): raise InterpreterException('Depends items must be build targets.') priority = kwargs.get('priority', 0) if not isinstance(priority, int): raise InterpreterException('Keyword argument priority must be an integer.') t = Test(args[0], prj, suite, exe.held_object, depends, par, cmd_args, env, should_fail, timeout, workdir, protocol, priority) if is_base_test: self.build.tests.append(t) mlog.debug('Adding test', mlog.bold(args[0], True)) else: self.build.benchmarks.append(t) mlog.debug('Adding benchmark', mlog.bold(args[0], True)) @FeatureNewKwargs('install_headers', '0.47.0', ['install_mode']) @permittedKwargs(permitted_kwargs['install_headers']) def func_install_headers(self, node, args, kwargs): source_files = self.source_strings_to_files(args) kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) h = Headers(source_files, kwargs) self.build.headers.append(h) return h @FeatureNewKwargs('install_man', '0.47.0', ['install_mode']) @permittedKwargs(permitted_kwargs['install_man']) def func_install_man(self, node, args, kwargs): fargs = self.source_strings_to_files(args) kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) m = Man(fargs, kwargs) self.build.man.append(m) return m @FeatureNewKwargs('subdir', '0.44.0', ['if_found']) @permittedKwargs(permitted_kwargs['subdir']) def func_subdir(self, node, args, kwargs): self.validate_arguments(args, 1, [str]) mesonlib.check_direntry_issues(args) if '..' in args[0]: raise InvalidArguments('Subdir contains ..') if self.subdir == '' and args[0] == self.subproject_dir: raise InvalidArguments('Must not go into subprojects dir with subdir(), use subproject() instead.') if self.subdir == '' and args[0].startswith('meson-'): raise InvalidArguments('The "meson-" prefix is reserved and cannot be used for top-level subdir().') for i in mesonlib.extract_as_list(kwargs, 'if_found'): if not hasattr(i, 'found_method'): raise InterpreterException('Object used in if_found does not have a found method.') if not i.found_method([], {}): return prev_subdir = self.subdir subdir = os.path.join(prev_subdir, args[0]) if os.path.isabs(subdir): raise InvalidArguments('Subdir argument must be a relative path.') absdir = os.path.join(self.environment.get_source_dir(), subdir) symlinkless_dir = os.path.realpath(absdir) if symlinkless_dir in self.visited_subdirs: raise InvalidArguments('Tried to enter directory "%s", which has already been visited.' % subdir) self.visited_subdirs[symlinkless_dir] = True self.subdir = subdir os.makedirs(os.path.join(self.environment.build_dir, subdir), exist_ok=True) buildfilename = os.path.join(self.subdir, environment.build_filename) self.build_def_files.append(buildfilename) absname = os.path.join(self.environment.get_source_dir(), buildfilename) if not os.path.isfile(absname): self.subdir = prev_subdir raise InterpreterException('Non-existent build file {!r}'.format(buildfilename)) with open(absname, encoding='utf8') as f: code = f.read() assert(isinstance(code, str)) try: codeblock = mparser.Parser(code, self.subdir).parse() except mesonlib.MesonException as me: me.file = buildfilename raise me try: self.evaluate_codeblock(codeblock) except SubdirDoneRequest: pass self.subdir = prev_subdir def _get_kwarg_install_mode(self, kwargs): if kwargs.get('install_mode', None) is None: return None install_mode = [] mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int)) for m in mode: # We skip any arguments that are set to `false` if m is False: m = None install_mode.append(m) if len(install_mode) > 3: raise InvalidArguments('Keyword argument install_mode takes at ' 'most 3 arguments.') if len(install_mode) > 0 and install_mode[0] is not None and \ not isinstance(install_mode[0], str): raise InvalidArguments('Keyword argument install_mode requires the ' 'permissions arg to be a string or false') return FileMode(*install_mode) @FeatureNewKwargs('install_data', '0.46.0', ['rename']) @FeatureNewKwargs('install_data', '0.38.0', ['install_mode']) @permittedKwargs(permitted_kwargs['install_data']) def func_install_data(self, node, args, kwargs): kwsource = mesonlib.stringlistify(kwargs.get('sources', [])) raw_sources = args + kwsource sources = [] source_strings = [] for s in raw_sources: if isinstance(s, mesonlib.File): sources.append(s) elif isinstance(s, str): source_strings.append(s) else: raise InvalidArguments('Argument {!r} must be string or file.'.format(s)) sources += self.source_strings_to_files(source_strings) install_dir = kwargs.get('install_dir', None) if not isinstance(install_dir, (str, type(None))): raise InvalidArguments('Keyword argument install_dir not a string.') install_mode = self._get_kwarg_install_mode(kwargs) rename = kwargs.get('rename', None) data = DataHolder(build.Data(sources, install_dir, install_mode, rename)) self.build.data.append(data.held_object) return data @FeatureNewKwargs('install_subdir', '0.42.0', ['exclude_files', 'exclude_directories']) @FeatureNewKwargs('install_subdir', '0.38.0', ['install_mode']) @permittedKwargs(permitted_kwargs['install_subdir']) @stringArgs def func_install_subdir(self, node, args, kwargs): if len(args) != 1: raise InvalidArguments('Install_subdir requires exactly one argument.') subdir = args[0] if 'install_dir' not in kwargs: raise InvalidArguments('Missing keyword argument install_dir') install_dir = kwargs['install_dir'] if not isinstance(install_dir, str): raise InvalidArguments('Keyword argument install_dir not a string.') if 'strip_directory' in kwargs: if not isinstance(kwargs['strip_directory'], bool): raise InterpreterException('"strip_directory" keyword must be a boolean.') strip_directory = kwargs['strip_directory'] else: strip_directory = False if 'exclude_files' in kwargs: exclude = extract_as_list(kwargs, 'exclude_files') for f in exclude: if not isinstance(f, str): raise InvalidArguments('Exclude argument not a string.') elif os.path.isabs(f): raise InvalidArguments('Exclude argument cannot be absolute.') exclude_files = set(exclude) else: exclude_files = set() if 'exclude_directories' in kwargs: exclude = extract_as_list(kwargs, 'exclude_directories') for d in exclude: if not isinstance(d, str): raise InvalidArguments('Exclude argument not a string.') elif os.path.isabs(d): raise InvalidArguments('Exclude argument cannot be absolute.') exclude_directories = set(exclude) else: exclude_directories = set() exclude = (exclude_files, exclude_directories) install_mode = self._get_kwarg_install_mode(kwargs) idir = InstallDir(self.subdir, subdir, install_dir, install_mode, exclude, strip_directory) self.build.install_dirs.append(idir) return idir @FeatureNewKwargs('configure_file', '0.47.0', ['copy', 'output_format', 'install_mode', 'encoding']) @FeatureNewKwargs('configure_file', '0.46.0', ['format']) @FeatureNewKwargs('configure_file', '0.41.0', ['capture']) @FeatureNewKwargs('configure_file', '0.50.0', ['install']) @FeatureNewKwargs('configure_file', '0.52.0', ['depfile']) @permittedKwargs(permitted_kwargs['configure_file']) def func_configure_file(self, node, args, kwargs): if len(args) > 0: raise InterpreterException("configure_file takes only keyword arguments.") if 'output' not in kwargs: raise InterpreterException('Required keyword argument "output" not defined.') actions = set(['configuration', 'command', 'copy']).intersection(kwargs.keys()) if len(actions) == 0: raise InterpreterException('Must specify an action with one of these ' 'keyword arguments: \'configuration\', ' '\'command\', or \'copy\'.') elif len(actions) == 2: raise InterpreterException('Must not specify both {!r} and {!r} ' 'keyword arguments since they are ' 'mutually exclusive.'.format(*actions)) elif len(actions) == 3: raise InterpreterException('Must specify one of {!r}, {!r}, and ' '{!r} keyword arguments since they are ' 'mutually exclusive.'.format(*actions)) if 'capture' in kwargs: if not isinstance(kwargs['capture'], bool): raise InterpreterException('"capture" keyword must be a boolean.') if 'command' not in kwargs: raise InterpreterException('"capture" keyword requires "command" keyword.') if 'format' in kwargs: fmt = kwargs['format'] if not isinstance(fmt, str): raise InterpreterException('"format" keyword must be a string.') else: fmt = 'meson' if fmt not in ('meson', 'cmake', 'cmake@'): raise InterpreterException('"format" possible values are "meson", "cmake" or "cmake@".') if 'output_format' in kwargs: output_format = kwargs['output_format'] if not isinstance(output_format, str): raise InterpreterException('"output_format" keyword must be a string.') else: output_format = 'c' if output_format not in ('c', 'nasm'): raise InterpreterException('"format" possible values are "c" or "nasm".') if 'depfile' in kwargs: depfile = kwargs['depfile'] if not isinstance(depfile, str): raise InterpreterException('depfile file name must be a string') else: depfile = None # Validate input inputs = self.source_strings_to_files(extract_as_list(kwargs, 'input')) inputs_abs = [] for f in inputs: if isinstance(f, mesonlib.File): inputs_abs.append(f.absolute_path(self.environment.source_dir, self.environment.build_dir)) else: raise InterpreterException('Inputs can only be strings or file objects') # Validate output output = kwargs['output'] if not isinstance(output, str): raise InterpreterException('Output file name must be a string') if inputs_abs: values = mesonlib.get_filenames_templates_dict(inputs_abs, None) outputs = mesonlib.substitute_values([output], values) output = outputs[0] if depfile: depfile = mesonlib.substitute_values([depfile], values)[0] ofile_rpath = os.path.join(self.subdir, output) if ofile_rpath in self.configure_file_outputs: mesonbuildfile = os.path.join(self.subdir, 'meson.build') current_call = "{}:{}".format(mesonbuildfile, self.current_lineno) first_call = "{}:{}".format(mesonbuildfile, self.configure_file_outputs[ofile_rpath]) mlog.warning('Output file', mlog.bold(ofile_rpath, True), 'for configure_file() at', current_call, 'overwrites configure_file() output at', first_call) else: self.configure_file_outputs[ofile_rpath] = self.current_lineno if os.path.dirname(output) != '': raise InterpreterException('Output file name must not contain a subdirectory.') (ofile_path, ofile_fname) = os.path.split(os.path.join(self.subdir, output)) ofile_abs = os.path.join(self.environment.build_dir, ofile_path, ofile_fname) # Perform the appropriate action if 'configuration' in kwargs: conf = kwargs['configuration'] if isinstance(conf, dict): FeatureNew('configure_file.configuration dictionary', '0.49.0').use(self.subproject) conf = ConfigurationDataHolder(self.subproject, conf) elif not isinstance(conf, ConfigurationDataHolder): raise InterpreterException('Argument "configuration" is not of type configuration_data') mlog.log('Configuring', mlog.bold(output), 'using configuration') if len(inputs) > 1: raise InterpreterException('At most one input file can given in configuration mode') if inputs: os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True) file_encoding = kwargs.setdefault('encoding', 'utf-8') missing_variables, confdata_useless = \ mesonlib.do_conf_file(inputs_abs[0], ofile_abs, conf.held_object, fmt, file_encoding) if missing_variables: var_list = ", ".join(map(repr, sorted(missing_variables))) mlog.warning( "The variable(s) %s in the input file '%s' are not " "present in the given configuration data." % ( var_list, inputs[0]), location=node) if confdata_useless: ifbase = os.path.basename(inputs_abs[0]) mlog.warning('Got an empty configuration_data() object and found no ' 'substitutions in the input file {!r}. If you want to ' 'copy a file to the build dir, use the \'copy:\' keyword ' 'argument added in 0.47.0'.format(ifbase), location=node) else: mesonlib.dump_conf_header(ofile_abs, conf.held_object, output_format) conf.mark_used() elif 'command' in kwargs: if len(inputs) > 1: FeatureNew('multiple inputs in configure_file()', '0.52.0').use(self.subproject) # We use absolute paths for input and output here because the cwd # that the command is run from is 'unspecified', so it could change. # Currently it's builddir/subdir for in_builddir else srcdir/subdir. values = mesonlib.get_filenames_templates_dict(inputs_abs, [ofile_abs]) if depfile: depfile = os.path.join(self.environment.get_scratch_dir(), depfile) values['@DEPFILE@'] = depfile # Substitute @INPUT@, @OUTPUT@, etc here. cmd = mesonlib.substitute_values(kwargs['command'], values) mlog.log('Configuring', mlog.bold(output), 'with command') res = self.run_command_impl(node, cmd, {}, True) if res.returncode != 0: raise InterpreterException('Running configure command failed.\n%s\n%s' % (res.stdout, res.stderr)) if 'capture' in kwargs and kwargs['capture']: dst_tmp = ofile_abs + '~' file_encoding = kwargs.setdefault('encoding', 'utf-8') with open(dst_tmp, 'w', encoding=file_encoding) as f: f.writelines(res.stdout) if inputs_abs: shutil.copymode(inputs_abs[0], dst_tmp) mesonlib.replace_if_different(ofile_abs, dst_tmp) if depfile: mlog.log('Reading depfile:', mlog.bold(depfile)) with open(depfile, 'r') as f: df = DepFile(f.readlines()) deps = df.get_all_dependencies(ofile_fname) for dep in deps: if dep not in self.build_def_files: self.build_def_files.append(dep) elif 'copy' in kwargs: if len(inputs_abs) != 1: raise InterpreterException('Exactly one input file must be given in copy mode') os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True) shutil.copyfile(inputs_abs[0], ofile_abs) shutil.copymode(inputs_abs[0], ofile_abs) else: # Not reachable raise AssertionError # If the input is a source file, add it to the list of files that we # need to reconfigure on when they change. for f in chain(inputs, kwargs.get('command', [])): if isinstance(f, mesonlib.File) and not f.is_built: # Normalize the path of the conffile (relative to the # source root) to avoid duplicates. This is especially # important to convert '/' to '\' on Windows conffile = os.path.normpath(f.relative_name()) if conffile not in self.build_def_files: self.build_def_files.append(conffile) # Install file if requested, we check for the empty string # for backwards compatibility. That was the behaviour before # 0.45.0 so preserve it. idir = kwargs.get('install_dir', '') if idir is False: idir = '' mlog.deprecation('Please use the new `install:` kwarg instead of passing ' '`false` to `install_dir:`', location=node) if not isinstance(idir, str): if isinstance(idir, list) and len(idir) == 0: mlog.deprecation('install_dir: kwarg must be a string and not an empty array. ' 'Please use the install: kwarg to enable or disable installation. ' 'This will be a hard error in the next release.') else: raise InterpreterException('"install_dir" must be a string') install = kwargs.get('install', idir != '') if not isinstance(install, bool): raise InterpreterException('"install" must be a boolean') if install: if not idir: raise InterpreterException('"install_dir" must be specified ' 'when "install" in a configure_file ' 'is true') cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname) install_mode = self._get_kwarg_install_mode(kwargs) self.build.data.append(build.Data([cfile], idir, install_mode)) return mesonlib.File.from_built_file(self.subdir, output) def extract_incdirs(self, kwargs): prospectives = listify(kwargs.get('include_directories', []), unholder=True) result = [] for p in prospectives: if isinstance(p, build.IncludeDirs): result.append(p) elif isinstance(p, str): result.append(self.build_incdir_object([p]).held_object) else: raise InterpreterException('Include directory objects can only be created from strings or include directories.') return result @permittedKwargs(permitted_kwargs['include_directories']) @stringArgs def func_include_directories(self, node, args, kwargs): return self.build_incdir_object(args, kwargs.get('is_system', False)) def build_incdir_object(self, incdir_strings, is_system=False): if not isinstance(is_system, bool): raise InvalidArguments('Is_system must be boolean.') src_root = self.environment.get_source_dir() build_root = self.environment.get_build_dir() absbase_src = os.path.join(src_root, self.subdir) absbase_build = os.path.join(build_root, self.subdir) for a in incdir_strings: if a.startswith(src_root): raise InvalidArguments('''Tried to form an absolute path to a source dir. You should not do that but use relative paths instead. To get include path to any directory relative to the current dir do incdir = include_directories(dirname) After this incdir will contain both the current source dir as well as the corresponding build dir. It can then be used in any subdirectory and Meson will take care of all the busywork to make paths work. Dirname can even be '.' to mark the current directory. Though you should remember that the current source and build directories are always put in the include directories by default so you only need to do include_directories('.') if you intend to use the result in a different subdirectory. ''') absdir_src = os.path.join(absbase_src, a) absdir_build = os.path.join(absbase_build, a) if not os.path.isdir(absdir_src) and not os.path.isdir(absdir_build): raise InvalidArguments('Include dir %s does not exist.' % a) i = IncludeDirsHolder(build.IncludeDirs(self.subdir, incdir_strings, is_system)) return i @permittedKwargs(permitted_kwargs['add_test_setup']) @stringArgs def func_add_test_setup(self, node, args, kwargs): if len(args) != 1: raise InterpreterException('Add_test_setup needs one argument for the setup name.') setup_name = args[0] if re.fullmatch('([_a-zA-Z][_0-9a-zA-Z]*:)?[_a-zA-Z][_0-9a-zA-Z]*', setup_name) is None: raise InterpreterException('Setup name may only contain alphanumeric characters.') if ":" not in setup_name: setup_name = (self.subproject if self.subproject else self.build.project_name) + ":" + setup_name try: inp = extract_as_list(kwargs, 'exe_wrapper', unholder=True) exe_wrapper = [] for i in inp: if isinstance(i, str): exe_wrapper.append(i) elif isinstance(i, dependencies.ExternalProgram): if not i.found(): raise InterpreterException('Tried to use non-found executable.') exe_wrapper += i.get_command() else: raise InterpreterException('Exe wrapper can only contain strings or external binaries.') except KeyError: exe_wrapper = None gdb = kwargs.get('gdb', False) if not isinstance(gdb, bool): raise InterpreterException('Gdb option must be a boolean') timeout_multiplier = kwargs.get('timeout_multiplier', 1) if not isinstance(timeout_multiplier, int): raise InterpreterException('Timeout multiplier must be a number.') is_default = kwargs.get('is_default', False) if not isinstance(is_default, bool): raise InterpreterException('is_default option must be a boolean') if is_default: if self.build.test_setup_default_name is not None: raise InterpreterException('\'%s\' is already set as default. ' 'is_default can be set to true only once' % self.build.test_setup_default_name) self.build.test_setup_default_name = setup_name env = self.unpack_env_kwarg(kwargs) self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, gdb, timeout_multiplier, env) @permittedKwargs(permitted_kwargs['add_global_arguments']) @stringArgs def func_add_global_arguments(self, node, args, kwargs): for_machine = self.machine_from_native_kwarg(kwargs) self.add_global_arguments(node, self.build.global_args[for_machine], args, kwargs) @permittedKwargs(permitted_kwargs['add_global_link_arguments']) @stringArgs def func_add_global_link_arguments(self, node, args, kwargs): for_machine = self.machine_from_native_kwarg(kwargs) self.add_global_arguments(node, self.build.global_link_args[for_machine], args, kwargs) @permittedKwargs(permitted_kwargs['add_project_arguments']) @stringArgs def func_add_project_arguments(self, node, args, kwargs): for_machine = self.machine_from_native_kwarg(kwargs) self.add_project_arguments(node, self.build.projects_args[for_machine], args, kwargs) @permittedKwargs(permitted_kwargs['add_project_link_arguments']) @stringArgs def func_add_project_link_arguments(self, node, args, kwargs): for_machine = self.machine_from_native_kwarg(kwargs) self.add_project_arguments(node, self.build.projects_link_args[for_machine], args, kwargs) def warn_about_builtin_args(self, args): warnargs = ('/W1', '/W2', '/W3', '/W4', '/Wall', '-Wall', '-Wextra', '-Wpedantic') optargs = ('-O0', '-O2', '-O3', '-Os', '/O1', '/O2', '/Os') for arg in args: if arg in warnargs: mlog.warning('Consider using the built-in warning_level option instead of using "{}".'.format(arg), location=self.current_node) elif arg in optargs: mlog.warning('Consider using the built-in optimization level instead of using "{}".'.format(arg), location=self.current_node) elif arg == '-g': mlog.warning('Consider using the built-in debug option instead of using "{}".'.format(arg), location=self.current_node) elif arg == '-pipe': mlog.warning("You don't need to add -pipe, Meson will use it automatically when it is available.", location=self.current_node) elif arg.startswith('-fsanitize'): mlog.warning('Consider using the built-in option for sanitizers instead of using "{}".'.format(arg), location=self.current_node) elif arg.startswith('-std=') or arg.startswith('/std:'): mlog.warning('Consider using the built-in option for language standard version instead of using "{}".'.format(arg), location=self.current_node) def add_global_arguments(self, node, argsdict, args, kwargs): if self.is_subproject(): msg = 'Function \'{}\' cannot be used in subprojects because ' \ 'there is no way to make that reliable.\nPlease only call ' \ 'this if is_subproject() returns false. Alternatively, ' \ 'define a variable that\ncontains your language-specific ' \ 'arguments and add it to the appropriate *_args kwarg ' \ 'in each target.'.format(node.func_name) raise InvalidCode(msg) frozen = self.project_args_frozen or self.global_args_frozen self.add_arguments(node, argsdict, frozen, args, kwargs) def add_project_arguments(self, node, argsdict, args, kwargs): if self.subproject not in argsdict: argsdict[self.subproject] = {} self.add_arguments(node, argsdict[self.subproject], self.project_args_frozen, args, kwargs) def add_arguments(self, node, argsdict, args_frozen, args, kwargs): if args_frozen: msg = 'Tried to use \'{}\' after a build target has been declared.\n' \ 'This is not permitted. Please declare all ' \ 'arguments before your targets.'.format(node.func_name) raise InvalidCode(msg) if 'language' not in kwargs: raise InvalidCode('Missing language definition in {}'.format(node.func_name)) self.warn_about_builtin_args(args) for lang in mesonlib.stringlistify(kwargs['language']): lang = lang.lower() argsdict[lang] = argsdict.get(lang, []) + args @noKwargs @noArgsFlattening def func_environment(self, node, args, kwargs): if len(args) > 1: raise InterpreterException('environment takes only one optional positional arguments') elif len(args) == 1: FeatureNew('environment positional arguments', '0.52.0').use(self.subproject) initial_values = args[0] if not isinstance(initial_values, dict) and not isinstance(initial_values, list): raise InterpreterException('environment first argument must be a dictionary or a list') else: initial_values = {} return EnvironmentVariablesHolder(initial_values) @stringArgs @noKwargs def func_join_paths(self, node, args, kwargs): return self.join_path_strings(args) def run(self): super().run() mlog.log('Build targets in project:', mlog.bold(str(len(self.build.targets)))) FeatureNew.report(self.subproject) FeatureDeprecated.report(self.subproject) if not self.is_subproject(): self.print_extra_warnings() def print_extra_warnings(self): # TODO cross compilation for c in self.coredata.compilers.host.values(): if c.get_id() == 'clang': self.check_clang_asan_lundef() break def check_clang_asan_lundef(self): if 'b_lundef' not in self.coredata.base_options: return if 'b_sanitize' not in self.coredata.base_options: return if (self.coredata.base_options['b_lundef'].value and self.coredata.base_options['b_sanitize'].value != 'none'): mlog.warning('''Trying to use {} sanitizer on Clang with b_lundef. This will probably not work. Try setting b_lundef to false instead.'''.format(self.coredata.base_options['b_sanitize'].value), location=self.current_node) def evaluate_subproject_info(self, path_from_source_root, subproject_dirname): depth = 0 subproj_name = '' segs = PurePath(path_from_source_root).parts segs_spd = PurePath(subproject_dirname).parts while segs and segs[0] == segs_spd[0]: if len(segs_spd) == 1: subproj_name = segs[1] segs = segs[2:] depth += 1 else: segs_spd = segs_spd[1:] segs = segs[1:] return (depth, subproj_name) # Check that the indicated file is within the same subproject # as we currently are. This is to stop people doing # nasty things like: # # f = files('../../master_src/file.c') # # Note that this is validated only when the file # object is generated. The result can be used in a different # subproject than it is defined in (due to e.g. a # declare_dependency). def validate_within_subproject(self, subdir, fname): norm = os.path.normpath(os.path.join(subdir, fname)) if os.path.isabs(norm): if not norm.startswith(self.environment.source_dir): # Grabbing files outside the source tree is ok. # This is for vendor stuff like: # # /opt/vendorsdk/src/file_with_license_restrictions.c return norm = os.path.relpath(norm, self.environment.source_dir) assert(not os.path.isabs(norm)) (num_sps, sproj_name) = self.evaluate_subproject_info(norm, self.subproject_dir) plain_filename = os.path.basename(norm) if num_sps == 0: if not self.is_subproject(): return raise InterpreterException('Sandbox violation: Tried to grab file %s from a different subproject.' % plain_filename) if num_sps > 1: raise InterpreterException('Sandbox violation: Tried to grab file %s from a nested subproject.' % plain_filename) if sproj_name != self.subproject_directory_name: raise InterpreterException('Sandbox violation: Tried to grab file %s from a different subproject.' % plain_filename) def source_strings_to_files(self, sources): results = [] mesonlib.check_direntry_issues(sources) if not isinstance(sources, list): sources = [sources] for s in sources: if isinstance(s, (mesonlib.File, GeneratedListHolder, TargetHolder, CustomTargetIndexHolder, GeneratedObjectsHolder)): pass elif isinstance(s, str): self.validate_within_subproject(self.subdir, s) s = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s) else: raise InterpreterException('Source item is {!r} instead of ' 'string or File-type object'.format(s)) results.append(s) return results def add_target(self, name, tobj): if name == '': raise InterpreterException('Target name must not be empty.') if name.strip() == '': raise InterpreterException('Target name must not consist only of whitespace.') if name.startswith('meson-'): raise InvalidArguments("Target names starting with 'meson-' are reserved " "for Meson's internal use. Please rename.") if name in coredata.forbidden_target_names: raise InvalidArguments("Target name '%s' is reserved for Meson's " "internal use. Please rename." % name) # To permit an executable and a shared library to have the # same name, such as "foo.exe" and "libfoo.a". idname = tobj.get_id() if idname in self.build.targets: raise InvalidCode('Tried to create target "%s", but a target of that name already exists.' % name) self.build.targets[idname] = tobj if idname not in self.coredata.target_guids: self.coredata.target_guids[idname] = str(uuid.uuid4()).upper() @FeatureNew('both_libraries', '0.46.0') def build_both_libraries(self, node, args, kwargs): shared_holder = self.build_target(node, args, kwargs, SharedLibraryHolder) # Check if user forces non-PIC static library. pic = True if 'pic' in kwargs: pic = kwargs['pic'] elif 'b_staticpic' in self.environment.coredata.base_options: pic = self.environment.coredata.base_options['b_staticpic'].value if pic: # Exclude sources from args and kwargs to avoid building them twice static_args = [args[0]] static_kwargs = kwargs.copy() static_kwargs['sources'] = [] static_kwargs['objects'] = shared_holder.held_object.extract_all_objects() else: static_args = args static_kwargs = kwargs static_holder = self.build_target(node, static_args, static_kwargs, StaticLibraryHolder) return BothLibrariesHolder(shared_holder, static_holder, self) def build_library(self, node, args, kwargs): default_library = self.coredata.get_builtin_option('default_library') if default_library == 'shared': return self.build_target(node, args, kwargs, SharedLibraryHolder) elif default_library == 'static': return self.build_target(node, args, kwargs, StaticLibraryHolder) elif default_library == 'both': return self.build_both_libraries(node, args, kwargs) else: raise InterpreterException('Unknown default_library value: %s.', default_library) def build_target(self, node, args, kwargs, targetholder): @FeatureNewKwargs('build target', '0.42.0', ['rust_crate_type', 'build_rpath', 'implicit_include_directories']) @FeatureNewKwargs('build target', '0.41.0', ['rust_args']) @FeatureNewKwargs('build target', '0.40.0', ['build_by_default']) @FeatureNewKwargs('build target', '0.48.0', ['gnu_symbol_visibility']) def build_target_decorator_caller(self, node, args, kwargs): return True build_target_decorator_caller(self, node, args, kwargs) if not args: raise InterpreterException('Target does not have a name.') name, *sources = args for_machine = self.machine_from_native_kwarg(kwargs) if 'sources' in kwargs: sources += listify(kwargs['sources']) sources = self.source_strings_to_files(sources) objs = extract_as_list(kwargs, 'objects') kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies') kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) if 'extra_files' in kwargs: ef = extract_as_list(kwargs, 'extra_files') kwargs['extra_files'] = self.source_strings_to_files(ef) self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources) if targetholder is ExecutableHolder: targetclass = build.Executable elif targetholder is SharedLibraryHolder: targetclass = build.SharedLibrary elif targetholder is SharedModuleHolder: targetclass = build.SharedModule elif targetholder is StaticLibraryHolder: targetclass = build.StaticLibrary elif targetholder is JarHolder: targetclass = build.Jar else: mlog.debug('Unknown target type:', str(targetholder)) raise RuntimeError('Unreachable code') self.kwarg_strings_to_includedirs(kwargs) # Filter out kwargs from other target types. For example 'soversion' # passed to library() when default_library == 'static'. kwargs = {k: v for k, v in kwargs.items() if k in targetclass.known_kwargs} kwargs['include_directories'] = self.extract_incdirs(kwargs) target = targetclass(name, self.subdir, self.subproject, for_machine, sources, objs, self.environment, kwargs) if not self.environment.machines.matches_build_machine(for_machine): self.add_cross_stdlib_info(target) l = targetholder(target, self) self.add_target(name, l.held_object) self.project_args_frozen = True return l def kwarg_strings_to_includedirs(self, kwargs): if 'd_import_dirs' in kwargs: items = mesonlib.extract_as_list(kwargs, 'd_import_dirs') cleaned_items = [] for i in items: if isinstance(i, str): # BW compatibility. This was permitted so we must support it # for a few releases so people can transition to "correct" # path declarations. if os.path.normpath(i).startswith(self.environment.get_source_dir()): mlog.warning('''Building a path to the source dir is not supported. Use a relative path instead. This will become a hard error in the future.''', location=self.current_node) i = os.path.relpath(i, os.path.join(self.environment.get_source_dir(), self.subdir)) i = self.build_incdir_object([i]) cleaned_items.append(i) kwargs['d_import_dirs'] = cleaned_items def get_used_languages(self, target): result = {} for i in target.sources: # TODO other platforms for lang, c in self.coredata.compilers.host.items(): if c.can_compile(i): result[lang] = True break return result def add_cross_stdlib_info(self, target): if target.for_machine != MachineChoice.HOST: return for l in self.get_used_languages(target): props = self.environment.properties.host if props.has_stdlib(l) \ and self.subproject != props.get_stdlib(l)[0]: target.add_deps(self.build.stdlibs.host[l]) def check_sources_exist(self, subdir, sources): for s in sources: if not isinstance(s, str): continue # This means a generated source and they always exist. fname = os.path.join(subdir, s) if not os.path.isfile(fname): raise InterpreterException('Tried to add non-existing source file %s.' % s) # Only permit object extraction from the same subproject def validate_extraction(self, buildtarget): if not self.subdir.startswith(self.subproject_dir): if buildtarget.subdir.startswith(self.subproject_dir): raise InterpreterException('Tried to extract objects from a subproject target.') else: if not buildtarget.subdir.startswith(self.subproject_dir): raise InterpreterException('Tried to extract objects from the main project from a subproject.') if self.subdir.split('/')[1] != buildtarget.subdir.split('/')[1]: raise InterpreterException('Tried to extract objects from a different subproject.') def check_contains(self, obj, args): if len(args) != 1: raise InterpreterException('Contains method takes exactly one argument.') item = args[0] for element in obj: if isinstance(element, list): found = self.check_contains(element, args) if found: return True if element == item: return True return False def is_subproject(self): return self.subproject != '' @noKwargs @noArgsFlattening def func_set_variable(self, node, args, kwargs): if len(args) != 2: raise InvalidCode('Set_variable takes two arguments.') varname, value = args self.set_variable(varname, value) @noKwargs @noArgsFlattening def func_get_variable(self, node, args, kwargs): if len(args) < 1 or len(args) > 2: raise InvalidCode('Get_variable takes one or two arguments.') varname = args[0] if not isinstance(varname, str): raise InterpreterException('First argument must be a string.') try: return self.variables[varname] except KeyError: pass if len(args) == 2: return args[1] raise InterpreterException('Tried to get unknown variable "%s".' % varname) @stringArgs @noKwargs def func_is_variable(self, node, args, kwargs): if len(args) != 1: raise InvalidCode('Is_variable takes two arguments.') varname = args[0] return varname in self.variables @staticmethod def machine_from_native_kwarg(kwargs: Dict[str, Any]) -> MachineChoice: native = kwargs.get('native', False) if not isinstance(native, bool): raise InvalidArguments('Argument to "native" must be a boolean.') return MachineChoice.BUILD if native else MachineChoice.HOST @FeatureNew('is_disabler', '0.52.0') @noKwargs def func_is_disabler(self, node, args, kwargs): if len(args) != 1: raise InvalidCode('Is_disabler takes one argument.') varname = args[0] return isinstance(varname, Disabler)
[]
[]
[]
[]
[]
python
0
0
models/models.go
package models import ( "os" "github.com/astaxie/beego" "github.com/astaxie/beego/orm" "github.com/bnhf/go-openvpn/server/config" passlib "gopkg.in/hlandau/passlib.v1" ) var GlobalCfg Settings func init() { initDB() createDefaultUsers() createDefaultSettings() createDefaultOVConfig() } func initDB() { orm.RegisterDriver("sqlite3", orm.DRSqlite) dbSource := "file:" + beego.AppConfig.String("dbPath") err := orm.RegisterDataBase("default", "sqlite3", dbSource) if err != nil { panic(err) } orm.Debug = true orm.RegisterModel( new(User), new(Settings), new(OVConfig), ) // Database alias. name := "default" // Drop table and re-create. force := false // Print log. verbose := true err = orm.RunSyncdb(name, force, verbose) if err != nil { beego.Error(err) return } } func createDefaultUsers() { hash, err := passlib.Hash("b3secure") if err != nil { beego.Error("Unable to hash password", err) } user := User{ Id: 1, Login: "admin", Name: "Administrator", Email: "root@localhost", Password: hash, } o := orm.NewOrm() if created, _, err := o.ReadOrCreate(&user, "Name"); err == nil { if created { beego.Info("Default admin account created") } else { beego.Debug(user) } } } func createDefaultSettings() { s := Settings{ Profile: "default", MIAddress: "openvpn:2080", MINetwork: "tcp", ServerAddress: "127.0.0.1", OVConfigPath: "/etc/openvpn/", } o := orm.NewOrm() if created, _, err := o.ReadOrCreate(&s, "Profile"); err == nil { GlobalCfg = s if created { beego.Info("New settings profile created") } else { beego.Debug(s) } } else { beego.Error(err) } } func createDefaultOVConfig() { c := OVConfig{ Profile: "default", Config: config.Config{ Dev: "tap0", Port: 1194, Proto: "udp", DNSServerOne: "8.8.8.8", DNSServerTwo: "8.8.4.4", Cipher: "AES-256-GCM", Auth: "SHA256", Dh: "none", Keepalive: "10 120", IfconfigPoolPersist: "ipp.txt", Management: "0.0.0.0 2080", CCEncryption: "easy-rsa/pki/ta.key", Server: "server-bridge 192.168.1.100 255.255.255.0 192.168.1.2 192.168.1.8", Ca: "easy-rsa/pki/ca.crt", Cert: "easy-rsa/pki/issued/" + os.Getenv("PIVPN_SERVER") + ".crt", Key: "easy-rsa/pki/private/" + os.Getenv("PIVPN_SERVER") + ".key", ExtraServerOptions: "push \"route 0.0.0.0 255.255.255.255 net_gateway\"\nclient-to-client\n# push block-outside-dns\n# push \"redirect-gateway def1\"\n# client-config-dir /etc/openvpn/ccd\n# duplicate-cn\nmax-clients 100\n", ExtraClientOptions: "dev tap\n# dev tun\n# lport 0", PiVPNServer: os.Getenv("PIVPN_SERVER"), }, } o := orm.NewOrm() if created, _, err := o.ReadOrCreate(&c, "Profile"); err == nil { if created { beego.Info("New settings profile created") } else { beego.Debug(c) } path := GlobalCfg.OVConfigPath + "/server.conf" if _, err = os.Stat(path); os.IsNotExist(err) { destPath := GlobalCfg.OVConfigPath + "/server.conf" if err = config.SaveToFile("conf/openvpn-server-config.tpl", c.Config, destPath); err != nil { beego.Error(err) } } } else { beego.Error(err) } }
[ "\"PIVPN_SERVER\"", "\"PIVPN_SERVER\"", "\"PIVPN_SERVER\"" ]
[]
[ "PIVPN_SERVER" ]
[]
["PIVPN_SERVER"]
go
1
0
cmd/dfdaemon/app/root.go
/* * Copyright The Dragonfly Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package app import ( "encoding/json" "os" "os/exec" "path/filepath" "reflect" "time" "github.com/dragonflyoss/Dragonfly/dfdaemon" "github.com/dragonflyoss/Dragonfly/dfdaemon/config" "github.com/dragonflyoss/Dragonfly/dfdaemon/constant" "github.com/dragonflyoss/Dragonfly/pkg/cmd" dferr "github.com/dragonflyoss/Dragonfly/pkg/errortypes" "github.com/dragonflyoss/Dragonfly/pkg/netutils" "github.com/dragonflyoss/Dragonfly/pkg/rate" "github.com/mitchellh/mapstructure" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" "gopkg.in/yaml.v2" ) const ( // DFDaemonEnvPrefix is the default environment prefix for Viper. // Both BindEnv and AutomaticEnv will use this prefix. DFDaemonEnvPrefix = "dfdaemon" ) var rootCmd = &cobra.Command{ Use: "dfdaemon", Short: "The dfdaemon is a proxy that intercepts image download requests.", Long: "The dfdaemon is a proxy between container engine and registry used for pulling images.", DisableAutoGenTag: true, // disable displaying auto generation tag in cli docs RunE: func(cmd *cobra.Command, args []string) error { if err := readConfigFile(viper.GetViper(), cmd); err != nil { return errors.Wrap(err, "read config file") } cfg, err := getConfigFromViper(cmd, viper.GetViper()) if err != nil { return errors.Wrap(err, "get config from viper") } if err := initDfdaemon(cfg); err != nil { return errors.Wrap(err, "init dfdaemon") } cfgJSON, _ := json.Marshal(cfg) logrus.Infof("using config: %s", cfgJSON) s, err := dfdaemon.NewFromConfig(*cfg) if err != nil { return errors.Wrap(err, "create dfdaemon from config") } // if stream mode, launch peer server in dfdaemon progress if cfg.StreamMode { go dfdaemon.LaunchPeerServer(*cfg) } return s.Start() }, } func init() { executable, err := exec.LookPath(os.Args[0]) exitOnError(err, "exec.LookPath") self, err := filepath.Abs(executable) exitOnError(err, "get absolute exec path") defaultDfgetPath := filepath.Join(filepath.Dir(self), "dfget") rf := rootCmd.Flags() rf.String("config", constant.DefaultConfigPath, "the path of dfdaemon's configuration file") rf.Bool("verbose", false, "verbose") rf.Int("maxprocs", 4, "the maximum number of CPUs that the dfdaemon can use") // http server config rf.String("hostIp", "127.0.0.1", "dfdaemon host ip, default: 127.0.0.1") rf.Uint("port", 65001, "dfdaemon will listen the port") rf.Uint("peerPort", 0, "peerserver will listen the port") rf.Bool("streamMode", false, "dfdaemon will run in stream mode") rf.String("certpem", "", "cert.pem file path") rf.String("keypem", "", "key.pem file path") rf.String("registry", "https://index.docker.io", "registry mirror url, which will override the registry mirror settings in the config file if presented") // dfget download config rf.String("localrepo", "", "temp output dir of dfdaemon") rf.String("workHome", filepath.Join(os.Getenv("HOME"), ".small-dragonfly"), "the work home directory of dfdaemon.") rf.String("dfpath", defaultDfgetPath, "dfget path") rf.Var(netutils.NetLimit(), "ratelimit", "net speed limit") rf.StringSlice("node", nil, "specify the addresses(host:port) of supernodes that will be passed to dfget.") exitOnError(bindRootFlags(viper.GetViper()), "bind root command flags") // add sub commands rootCmd.AddCommand(NewGenCACommand()) rootCmd.AddCommand(cmd.NewGenDocCommand("dfdaemon")) rootCmd.AddCommand(cmd.NewVersionCommand("dfdaemon")) rootCmd.AddCommand(cmd.NewConfigCommand("dfdaemon", getDefaultConfig)) } // bindRootFlags binds flags on rootCmd to the given viper instance. func bindRootFlags(v *viper.Viper) error { if err := v.BindPFlags(rootCmd.Flags()); err != nil { return err } if err := v.BindPFlag("registry_mirror.remote", rootCmd.Flag("registry")); err != nil { return err } v.SetEnvPrefix(DFDaemonEnvPrefix) v.AutomaticEnv() return nil } // readConfigFile reads config file into the given viper instance. If we're // reading the default configuration file and the file does not exist, nil will // be returned. func readConfigFile(v *viper.Viper, cmd *cobra.Command) error { v.SetConfigFile(v.GetString("config")) v.SetConfigType("yaml") if err := v.ReadInConfig(); err != nil { // when the default config file is not found, ignore the error if os.IsNotExist(err) && !cmd.Flag("config").Changed { return nil } return err } return nil } func exitOnError(err error, msg string) { if err != nil { logrus.Fatalf("%s: %v", msg, err) } } // Execute runs dfdaemon. func Execute() { if err := rootCmd.Execute(); err != nil { logrus.Errorf("dfdaemon failed: %v", err) if e, ok := errors.Cause(err).(*dferr.DfError); ok { os.Exit(e.Code) } else { os.Exit(1) } } } // getDefaultConfig returns the default configuration of dfdaemon func getDefaultConfig() (interface{}, error) { return getConfigFromViper(rootCmd, viper.GetViper()) } // getConfigFromViper returns dfdaemon config from the given viper instance func getConfigFromViper(cmd *cobra.Command, v *viper.Viper) (*config.Properties, error) { // override supernodes in config file if --node is specified in cli. // use default value if no supernodes is configured in config file if cmd.Flags().Lookup("node").Changed || len(v.GetStringSlice("supernodes")) == 0 { v.Set("supernodes", v.GetStringSlice("node")) } var cfg config.Properties if err := v.Unmarshal(&cfg, func(dc *mapstructure.DecoderConfig) { dc.TagName = "yaml" dc.DecodeHook = decodeWithYAML( reflect.TypeOf(config.Regexp{}), reflect.TypeOf(config.URL{}), reflect.TypeOf(config.CertPool{}), reflect.TypeOf(time.Second), reflect.TypeOf(rate.B), ) }); err != nil { return nil, errors.Wrap(err, "unmarshal yaml") } // use `{WorkHome}/dfdaemon/data/` as repo dir if localrepo is not configured. if cfg.DFRepo == "" { cfg.DFRepo = filepath.Join(cfg.WorkHome, "dfdaemon/data/") } return &cfg, cfg.Validate() } // decodeWithYAML returns a mapstructure.DecodeHookFunc to decode the given // types by unmarshalling from yaml text. func decodeWithYAML(types ...reflect.Type) mapstructure.DecodeHookFunc { return func(f, t reflect.Type, data interface{}) (interface{}, error) { for _, typ := range types { if t == typ { b, _ := yaml.Marshal(data) v := reflect.New(t) return v.Interface(), yaml.Unmarshal(b, v.Interface()) } } return data, nil } }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
mindspore/_extends/parallel_compile/tbe_compiler/tbe_process.py
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """tbe process""" import traceback import multiprocessing import subprocess import sys import os import json from mindspore import log as logger from .common import check_kernel_info, TBEException from .helper import _op_select_format, _check_supported def create_tbe_parallel_process(): """ create TBEParallelCompiler object Returns: TBEParallelCompiler """ return tbe_process def op_select_format(op_json: str): """ call op's op_select_format to get op supported format Args: op_json (str): json string of the op Returns: op supported format or exception message """ ret = "" try: kernel_info = json.loads(op_json) check_kernel_info(kernel_info) ret = _op_select_format(kernel_info) except TBEException as e: return "TBEException: " + str(e) return ret def check_supported(op_json: str): """ call op's check_supported to check supported or not Args: op_json (str): json string of the op Returns: bool: check result, true or false str: exception message when catch an Exception """ ret = "" try: kernel_info = json.loads(op_json) check_kernel_info(kernel_info) ret = _check_supported(kernel_info) except TBEException as e: return "TBEException: " + str(e) return ret def run_compiler(op_json): """ run compiler to compile op with subprocess Args: op_json (str): json string of the op Returns: result type, result. """ try: tbe_compiler = os.path.join(os.path.split(os.path.realpath(__file__))[0], "compiler.py") completed_object = subprocess.run([sys.executable, tbe_compiler], input=op_json, timeout=300, text=True, capture_output=True, check=True) return "Success", completed_object.stderr except subprocess.TimeoutExpired: tb = traceback.format_exc() return "TBEException", "ERROR: " + tb + "\ninput_args: " + op_json except subprocess.CalledProcessError as e: return "TBEException", "ERROR:\n" + e.stdout + "\n" + e.stderr + "\ninput_args: " + op_json class TbeProcess: """tbe process""" def __init__(self): self.__processe_num = multiprocessing.cpu_count() self.default_num = 24 self.__pool = None self.__next_task_id = 1 self.__running_tasks = [] def __del__(self): if self.__pool is not None: self.__pool.terminate() self.__pool.join() del self.__pool def init_process_num(self): """ init compile process num :return: str Success or other string info """ # max_processes_num: Set the maximum number of concurrent processes for compiler process_num = os.getenv("MS_BUILD_PROCESS_NUM") res = "Success" if process_num is None: logger.info(f"Using default compile process num {self.default_num}") elif process_num.isdigit(): if int(process_num) in range(1, 25): self.default_num = int(process_num) logger.info(f"Using custom compile process num {self.default_num}") else: res = "TBEException",\ "ERROR: [MS_BUILD_PROCESS_NUM] should be in range(1, 25), but got : " + str(process_num) elif not process_num.isdigit(): res = "TBEException", "ERROR: [MS_BUILD_PROCESS_NUM] type should be a int num, but got :" + process_num return res def exit(self): if self.__pool is not None: self.__pool.terminate() self.__pool.join() del self.__pool def start_compile_op(self, op_json): """ start compile op async. Args: op_json (str): json string of the op Returns: int, task id(>0). -1 if error """ if self.__processe_num > self.default_num: self.__processe_num = self.default_num task_id = self.__next_task_id self.__next_task_id = self.__next_task_id + 1 if self.__pool is None: self.__pool = multiprocessing.Pool(processes=self.__processe_num) task_future = self.__pool.apply_async(func=run_compiler, args=(op_json,)) self.__running_tasks.append((task_id, task_future)) return task_id def wait_one(self): """ wait until a compile task finish Returns: int, id of the finished task. -1 if error,0 if no unfinished task str, result of compile task """ ret = 0, "Success" if self.__running_tasks: task_id, task_future = self.__running_tasks.pop(0) ret_type, result = task_future.get(330) if ret_type == "Success": ret = task_id, "Success", result elif ret_type in ("Exception", "TBEException"): ret = task_id, ret_type + ":" + result, "_" else: ret = task_id, "Exception: Not support return type:" + str(ret_type), "_" return ret def reset_task_info(self): """ reset task info when task compile error """ if self.__running_tasks: self.__running_tasks.clear() tbe_process = TbeProcess()
[]
[]
[ "MS_BUILD_PROCESS_NUM" ]
[]
["MS_BUILD_PROCESS_NUM"]
python
1
0
azuredevops/internal/acceptancetests/resource_user_entitlement_test.go
//go:build (all || resource_user_entitlement) && !exclude_resource_user_entitlement // +build all resource_user_entitlement // +build !exclude_resource_user_entitlement package acceptancetests import ( "fmt" "os" "strings" "testing" "github.com/golang/mock/gomock" "github.com/google/uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/microsoft/azure-devops-go-api/azuredevops/v6/memberentitlementmanagement" "github.com/microsoft/terraform-provider-azuredevops/azuredevops/internal/acceptancetests/testutils" "github.com/microsoft/terraform-provider-azuredevops/azuredevops/internal/client" "github.com/microsoft/terraform-provider-azuredevops/azuredevops/internal/utils" ) func TestAccUserEntitlement_Create(t *testing.T) { tfNode := "azuredevops_user_entitlement.user" principalName := os.Getenv("AZDO_TEST_AAD_USER_EMAIL") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testutils.PreCheck(t, &[]string{"AZDO_TEST_AAD_USER_EMAIL"}) }, Providers: testutils.GetProviders(), CheckDestroy: checkUserEntitlementDestroyed, Steps: []resource.TestStep{ { Config: testutils.HclUserEntitlementResource(principalName), Check: resource.ComposeTestCheckFunc( checkUserEntitlementExists(principalName), resource.TestCheckResourceAttrSet(tfNode, "descriptor"), ), }, }, }) } // Given the principalName of an AzDO userEntitlement, this will return a function that will check whether // or not the userEntitlement (1) exists in the state and (2) exist in AzDO and (3) has the correct name func checkUserEntitlementExists(expectedPrincipalName string) resource.TestCheckFunc { return func(s *terraform.State) error { resource, ok := s.RootModule().Resources["azuredevops_user_entitlement.user"] if !ok { return fmt.Errorf("Did not find a UserEntitlement in the TF state") } clients := testutils.GetProvider().Meta().(*client.AggregatedClient) id, err := uuid.Parse(resource.Primary.ID) if err != nil { return fmt.Errorf("Error parsing UserEntitlement ID, got %s: %v", resource.Primary.ID, err) } userEntitlement, err := clients.MemberEntitleManagementClient.GetUserEntitlement(clients.Ctx, memberentitlementmanagement.GetUserEntitlementArgs{ UserId: &id, }) if err != nil { return fmt.Errorf("UserEntitlement with ID=%s cannot be found!. Error=%v", id, err) } if !strings.EqualFold(strings.ToLower(*userEntitlement.User.PrincipalName), strings.ToLower(expectedPrincipalName)) { return fmt.Errorf("UserEntitlement with ID=%s has PrincipalName=%s, but expected Name=%s", resource.Primary.ID, *userEntitlement.User.PrincipalName, expectedPrincipalName) } return nil } } // verifies that all projects referenced in the state are destroyed. This will be invoked // *after* terraform destroys the resource but *before* the state is wiped clean. func checkUserEntitlementDestroyed(s *terraform.State) error { clients := testutils.GetProvider().Meta().(*client.AggregatedClient) //verify that every users referenced in the state does not exist in AzDO for _, resource := range s.RootModule().Resources { if resource.Type != "azuredevops_user_entitlement" { continue } id, err := uuid.Parse(resource.Primary.ID) if err != nil { return fmt.Errorf("Error parsing UserEntitlement ID, got %s: %v", resource.Primary.ID, err) } userEntitlement, err := clients.MemberEntitleManagementClient.GetUserEntitlement(clients.Ctx, memberentitlementmanagement.GetUserEntitlementArgs{ UserId: &id, }) if err != nil { if utils.ResponseWasNotFound(err) { return nil } return fmt.Errorf("Bad: Get UserEntitlment : %+v", err) } if userEntitlement != nil && userEntitlement.AccessLevel != nil && string(*userEntitlement.AccessLevel.Status) != "none" { return fmt.Errorf("Status should be none : %s with readUserEntitlement error %v", string(*userEntitlement.AccessLevel.Status), err) } } return nil } type matchAddUserEntitlementArgs struct { t *testing.T x memberentitlementmanagement.AddUserEntitlementArgs } func MatchAddUserEntitlementArgs(t *testing.T, x memberentitlementmanagement.AddUserEntitlementArgs) gomock.Matcher { return &matchAddUserEntitlementArgs{t, x} } func (m *matchAddUserEntitlementArgs) Matches(x interface{}) bool { args := x.(memberentitlementmanagement.AddUserEntitlementArgs) m.t.Logf("MatchAddUserEntitlementArgs:\nVALUE: account_license_type: [%s], licensing_source: [%s], origin: [%s], origin_id: [%s], principal_name: [%s]\n REF: account_license_type: [%s], licensing_source: [%s], origin: [%s], origin_id: [%s], principal_name: [%s]\n", *args.UserEntitlement.AccessLevel.AccountLicenseType, *args.UserEntitlement.AccessLevel.LicensingSource, *args.UserEntitlement.User.Origin, *args.UserEntitlement.User.OriginId, *args.UserEntitlement.User.PrincipalName, *m.x.UserEntitlement.AccessLevel.AccountLicenseType, *m.x.UserEntitlement.AccessLevel.LicensingSource, *m.x.UserEntitlement.User.Origin, *m.x.UserEntitlement.User.OriginId, *m.x.UserEntitlement.User.PrincipalName) return *args.UserEntitlement.AccessLevel.AccountLicenseType == *m.x.UserEntitlement.AccessLevel.AccountLicenseType && *args.UserEntitlement.User.Origin == *m.x.UserEntitlement.User.Origin && *args.UserEntitlement.User.OriginId == *m.x.UserEntitlement.User.OriginId && *args.UserEntitlement.User.PrincipalName == *m.x.UserEntitlement.User.PrincipalName } func (m *matchAddUserEntitlementArgs) String() string { return fmt.Sprintf("account_license_type: [%s], licensing_source: [%s], origin: [%s], origin_id: [%s], principal_name: [%s]", *m.x.UserEntitlement.AccessLevel.AccountLicenseType, *m.x.UserEntitlement.AccessLevel.LicensingSource, *m.x.UserEntitlement.User.Origin, *m.x.UserEntitlement.User.OriginId, *m.x.UserEntitlement.User.PrincipalName) }
[ "\"AZDO_TEST_AAD_USER_EMAIL\"" ]
[]
[ "AZDO_TEST_AAD_USER_EMAIL" ]
[]
["AZDO_TEST_AAD_USER_EMAIL"]
go
1
0
locallibrary/settings.py
""""Django settings for locallibrary project. Generated by 'django-admin startproject' using Django 2.1.5. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! #SECRET_KEY = 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag' import os SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag') # SECURITY WARNING: don't run with debug turned on in production! #DEBUG = True DEBUG = bool(os.environ.get('DJANGO_DEBUG', True)) # Set hosts to allow any app on Heroku and the local testing URL ALLOWED_HOSTS = ['.herokuapp.com','127.0.0.1'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Add our new application 'catalog.apps.CatalogConfig', #This object was created for us in /catalog/apps.py ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'locallibrary.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': ['./templates',], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'locallibrary.wsgi.application' # Database # https://docs.djangoproject.com/en/2.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Redirect to home URL after login (Default redirects to /accounts/profile/) LOGIN_REDIRECT_URL = '/' # Add to test email: EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # Heroku: Update database configuration from $DATABASE_URL. import dj_database_url db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ # The absolute path to the directory where collectstatic will collect static files for deployment. STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') # The URL to use when referring to static files (where they will be served from) STATIC_URL = '/static/' # Static file serving. # http://whitenoise.evans.io/en/stable/django.html#django-middleware STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # Configure logging LOGGING = { 'version': 1, 'formatters': { 'json': { '()': 'pythonjsonlogger.jsonlogger.JsonFormatter' }, 'json_loggly': { 'format': '{ "loggerName":"%(name)s", "timestamp":"%(asctime)s", "fileName":"%(filename)s", "logRecordCreationTime":"%(created)f", "functionName":"%(funcName)s", "levelNo":"%(levelno)s", "lineNo":"%(lineno)d", "time":"%(msecs)d", "levelName":"%(levelname)s", "message":"%(message)s"}', 'datefmt': '' }, }, 'handlers': { 'file': { 'level': 'DEBUG', 'class': 'logging.FileHandler', 'filename': 'debug.log', 'formatter': 'json' }, 'loggly': { 'class': 'loggly.handlers.HTTPSHandler', 'level': 'DEBUG', 'formatter': 'json_loggly', 'url': 'https://logs-01.loggly.com/inputs/' + os.environ['LOGDNA_KEY_ANEWMAN'] + '/tag/python', # anewman.loggly.com #'url': 'https://logs-01.loggly.com/inputs/' + os.environ['LOGDNA_KEY_DEVSPOTLIGHT'] + '/tag/python',, # devspotlight.loggly.com }, 'syslog': { 'level': 'DEBUG', 'class': 'logging.handlers.SysLogHandler', 'facility': 'local7', 'address': ('localhost', 514), #'formatter': 'json' }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'json' }, }, 'loggers': { 'django': { 'handlers': ['file', 'loggly', 'syslog'], 'level': 'DEBUG' }, }, }
[]
[]
[ "DJANGO_DEBUG", "DJANGO_SECRET_KEY", "LOGDNA_KEY_ANEWMAN", "LOGDNA_KEY_DEVSPOTLIGHT" ]
[]
["DJANGO_DEBUG", "DJANGO_SECRET_KEY", "LOGDNA_KEY_ANEWMAN", "LOGDNA_KEY_DEVSPOTLIGHT"]
python
4
0
validate.py
# coding=utf-8 import ftplib import json import logging import os import uuid from shared import config_file # manuelles Logging, da certbot Ausgabe dieses Skripts unterdrückt logging.basicConfig(filename='validation.log', level=logging.DEBUG, format='%(asctime)s %(message)s') # Mapping zwischen Domains und Verzeichnis auf FTP laden with open(config_file('domains.json')) as domain_file: DOMAINS = json.load(domain_file) # zu validierende Domain, Dateinamen and Token Inhalt werden von certbot per Umgebungsvariable übergeben domain = os.environ['CERTBOT_DOMAIN'] filename = os.environ['CERTBOT_TOKEN'] content = os.environ['CERTBOT_VALIDATION'] logging.debug('Domain: ' + domain) logging.debug('Inhalt: ' + content) logging.debug('Dateiname: ' + filename) path = DOMAINS.get(domain) if not path: logging.debug('Kein Mapping für Domain gefunden. Breche ab!') exit(1) # FTP Zugangsdaten laden with open(config_file('ftp.json')) as ftp_file: ftp_cfg = json.load(ftp_file) # mit FTP verbinden ftp = ftplib.FTP_TLS(ftp_cfg['server'], ftp_cfg['login'], ftp_cfg['passwort']) root_dir = ftp.pwd() # zum Pfad navigieren, in dem Challenge angelegt werden muss ftp.cwd(root_dir + path) try: ftp.cwd('.well-known/acme-challenge') except: logging.debug('Creating missing .well-known/acme-challenge directory.') ftp.mkd('.well-known') ftp.cwd('.well-known') ftp.mkd('acme-challenge') ftp.cwd('acme-challenge') # temporäre Datei mit Token Inhalt anlegen temp_filename = str(uuid.uuid4()) logging.debug('Lege temporäre Datei {} mit Token Inhalt an.'.format(temp_filename)) with open(temp_filename, 'wb') as temp: temp.write(str.encode(content)) # temporäre Datei unter von certbot vorgegebenen Namen auf FTP hochladen with open(temp_filename, 'rb') as temp: ftp.storbinary('STOR %s' % filename, temp) ftp.close() # temporäre Datei löschen os.remove(temp_filename)
[]
[]
[ "CERTBOT_DOMAIN", "CERTBOT_VALIDATION", "CERTBOT_TOKEN" ]
[]
["CERTBOT_DOMAIN", "CERTBOT_VALIDATION", "CERTBOT_TOKEN"]
python
3
0
main.go
package main import ( "expvar" "flag" "fmt" "log" "net/http" "os" "os/signal" "syscall" "github.com/Tri125/HoP/commands" "github.com/Tri125/HoP/metrics" "github.com/bwmarrin/discordgo" ) /* Set this variable with go build with the -ldflags="-X main.version=<value>" parameter. */ var version = "undefined" // Variables used for commands line parameters var ( Token string ) func init() { versionFlag := flag.Bool("v", false, "Prints current version") flag.StringVar(&Token, "t", "", "Bot Token") flag.Parse() if *versionFlag { fmt.Println(version) os.Exit(0) } } func main() { if Token == "" { var present bool Token, present = os.LookupEnv("HOP_TOKEN") if !present { log.Fatal("Token not set.") } } dg, err := discordgo.New("Bot " + Token) if err != nil { metrics.ErrorEncountered.Add(1) log.Println("error creating Discord session,", err) return } //metrics.SetServer() port := os.Getenv("PORT") if port == "" { port = "8080" } http.Handle("/metrics", expvar.Handler()) srv := &http.Server{Addr: ":" + port, Handler: nil} go func() { if err := srv.ListenAndServe(); err != nil { log.Fatal(err) } }() // Register the messageCreate func as a callback for MessageCreate events. dg.AddHandler(messageCreate) dg.AddHandler(guildJoin) dg.AddHandler(guildRemove) // Open a websocket connection to Discord and begin listening. err = dg.Open() if err != nil { metrics.ErrorEncountered.Add(1) log.Println("error opening connection,", err) return } // Wait here until CTRL-C or other term signal is received. log.Println("Bot is now running. Press CTRL-C to exit.") log.Println("Version : ", version) dg.UpdateStatus(0, "Summoning Singulo") sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill) <-sc // Cleanly close down the Discord session. dg.Close() metrics.Close() log.Println("Server gracefully stopped.") } func guildJoin(s *discordgo.Session, c *discordgo.GuildCreate) { metrics.JoinedGuilds.Add(1) } func guildRemove(s *discordgo.Session, r *discordgo.GuildDelete) { metrics.JoinedGuilds.Add(-1) } // This function will be called (due to AddHandler above) every time a new // message is created on any channel that the autenticated bot has access to. func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) { // Ignore all messages created by bots, including himself // This isn't required in this specific example but it's a good practice. if m.Author.Bot || len(m.Content) > 100 { return } // Find the channel that the message came from. c, err := s.State.Channel(m.ChannelID) if err != nil { metrics.ErrorEncountered.Add(1) // Could not find channel. return } // Find the guild for that channel. g, err := s.State.Guild(c.GuildID) if err != nil { metrics.ErrorEncountered.Add(1) // Could not find guild. return } if m.Content == "!grant Captain Access" { s.ChannelMessageSend(m.ChannelID, "Go home, Clown.") } else { command := commands.GetCommand(m.Content) switch command := command.(type) { default: break case commands.RemoveType: command.RemoveRole(s, g, c, m.Author, m.Content) break case commands.GrantType: command.GrantRole(s, g, c, m.Author, m.Content) break case commands.JobType: command.Jobs(s, g, c, m.Author) break case commands.HelpType: command.HoP(s, m.Author) } } metrics.RequestCounter.Incr(1) }
[ "\"PORT\"" ]
[]
[ "PORT" ]
[]
["PORT"]
go
1
0
pytorch_lightning/strategies/ddp_spawn.py
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from collections import UserList from multiprocessing.queues import SimpleQueue from typing import Any, Callable, Dict, List, NamedTuple, Optional, Union import numpy as np import torch import torch.distributed import torch.multiprocessing as mp from torch.nn import Module from torch.nn.parallel.distributed import DistributedDataParallel import pytorch_lightning as pl from pytorch_lightning.overrides import LightningDistributedModule from pytorch_lightning.overrides.distributed import prepare_for_backward from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO from pytorch_lightning.plugins.precision import PrecisionPlugin from pytorch_lightning.strategies.parallel import ParallelStrategy from pytorch_lightning.trainer.states import TrainerFn, TrainerState from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_8 from pytorch_lightning.utilities.apply_func import apply_to_collection, move_data_to_device from pytorch_lightning.utilities.distributed import _revert_sync_batchnorm, distributed_available from pytorch_lightning.utilities.distributed import group as _group from pytorch_lightning.utilities.distributed import init_dist_connection, ReduceOp, sync_ddp_if_available from pytorch_lightning.utilities.enums import _StrategyType from pytorch_lightning.utilities.model_helpers import is_overridden from pytorch_lightning.utilities.rank_zero import rank_zero_debug, rank_zero_only, rank_zero_warn from pytorch_lightning.utilities.seed import reset_seed from pytorch_lightning.utilities.types import _PATH, STEP_OUTPUT if _TORCH_GREATER_EQUAL_1_8: from pytorch_lightning.utilities.distributed import register_ddp_comm_hook log = logging.getLogger(__name__) class DDPSpawnStrategy(ParallelStrategy): """Spawns processes using the :func:`torch.multiprocessing.spawn` method and joins processes after training finishes.""" distributed_backend = _StrategyType.DDP_SPAWN def __init__( self, accelerator: Optional["pl.accelerators.accelerator.Accelerator"] = None, parallel_devices: Optional[List[torch.device]] = None, cluster_environment: Optional[ClusterEnvironment] = None, checkpoint_io: Optional[CheckpointIO] = None, precision_plugin: Optional[PrecisionPlugin] = None, ddp_comm_state: Optional[object] = None, ddp_comm_hook: Optional[callable] = None, ddp_comm_wrapper: Optional[callable] = None, **kwargs: Any, ): super().__init__( accelerator=accelerator, parallel_devices=parallel_devices, cluster_environment=cluster_environment, checkpoint_io=checkpoint_io, precision_plugin=precision_plugin, ) self._num_nodes = 1 self.sync_batchnorm = False self._ddp_kwargs = kwargs self._ddp_comm_state = ddp_comm_state self._ddp_comm_hook = ddp_comm_hook self._ddp_comm_wrapper = ddp_comm_wrapper self._local_rank = 0 self.set_world_ranks() @property def num_nodes(self) -> int: return self._num_nodes @num_nodes.setter def num_nodes(self, num_nodes: int) -> None: # note that world ranks is related to num_nodes, when resetting it, need to reset world ranks self._num_nodes = num_nodes self.set_world_ranks() @property def local_rank(self) -> int: return self._local_rank @property def root_device(self): return self.parallel_devices[self.local_rank] @property def num_processes(self): return len(self.parallel_devices) if self.parallel_devices is not None else 0 @property def distributed_sampler_kwargs(self): distributed_sampler_kwargs = dict(num_replicas=(self.num_nodes * self.num_processes), rank=self.global_rank) return distributed_sampler_kwargs @property def _is_single_process_single_device(self): return True def setup(self, trainer: "pl.Trainer") -> None: os.environ["MASTER_PORT"] = str(self.cluster_environment.main_port) super().setup(trainer) # move the model to the correct device self.model_to_device() if self.sync_batchnorm: self.model = self.configure_sync_batchnorm(self.model) # skip wrapping the model if we are not fitting as no gradients need to be exchanged trainer_fn = self.lightning_module.trainer.state.fn if trainer_fn == TrainerFn.FITTING: self.configure_ddp() def _setup_model(self, model: Module) -> DistributedDataParallel: """Wraps the model into a :class:`~torch.nn.parallel.distributed.DistributedDataParallel` module.""" return DistributedDataParallel(module=model, device_ids=self.determine_ddp_device_ids(), **self._ddp_kwargs) def set_world_ranks(self, process_idx: int = 0) -> None: self._local_rank = process_idx if self.cluster_environment is None: return self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank) self.cluster_environment.set_world_size(self.num_nodes * self.num_processes) rank_zero_only.rank = self.cluster_environment.global_rank() def get_mp_spawn_kwargs(self, trainer: Optional["pl.Trainer"] = None) -> Dict[str, Any]: return {"nprocs": self.num_processes} def spawn(self, function: Callable, *args: Any, **kwargs: Any) -> Optional[Union[Any, "_SpawnOutput"]]: """Spawn processes that run the given function. Args: function: The function to spawn processes from. *args: Optional positional arguments that will be passed to the function in addition to the process index. These arguments must be pickleable. **kwargs: Optional named arguments that will be passed to the function in addition to the process index. These arguments must be pickleable. Return: The output of the function of process 0. """ os.environ["MASTER_PORT"] = str(self.cluster_environment.main_port) context = mp.get_context("spawn") return_queue = context.SimpleQueue() mp.spawn(self._wrapped_function, args=(function, args, kwargs, return_queue), nprocs=self.num_processes) return return_queue.get() def _wrapped_function( self, process_idx: int, function: Callable, args: Any, kwargs: Any, return_queue: SimpleQueue ) -> None: self._worker_setup(process_idx) result = function(*args, **kwargs) if self.local_rank == 0: return_queue.put(move_data_to_device(result, "cpu")) def _worker_setup(self, process_idx: int): reset_seed() self.set_world_ranks(process_idx) rank_zero_only.rank = self.global_rank init_dist_connection( self.cluster_environment, self.torch_distributed_backend, self.global_rank, self.world_size ) def pre_configure_ddp(self): # if unset, default `find_unused_parameters` `True` # Many models require setting this parameter to True, as there are corner cases # when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True. # This flag does come with a performance hit, so it is suggested to disable in cases where it is possible. self._ddp_kwargs["find_unused_parameters"] = self._ddp_kwargs.get("find_unused_parameters", True) if not self.lightning_module.automatic_optimization and not self._ddp_kwargs.get( "find_unused_parameters", False ): # TODO: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization rank_zero_warn( "From PyTorch 1.7.0, Lightning `manual_optimization` needs to set `find_unused_parameters=True` to" " properly work with DDP. Using `find_unused_parameters=True`." ) self._ddp_kwargs["find_unused_parameters"] = True def _register_ddp_hooks(self) -> None: # currently, DDP communication hooks only work with NCCL backend and SPSD (single process single device) mode # https://github.com/pytorch/pytorch/blob/v1.8.0/torch/nn/parallel/distributed.py#L1080-L1084 if _TORCH_GREATER_EQUAL_1_8 and self.root_device.type == "cuda" and self._is_single_process_single_device: register_ddp_comm_hook( model=self.model, ddp_comm_state=self._ddp_comm_state, ddp_comm_hook=self._ddp_comm_hook, ddp_comm_wrapper=self._ddp_comm_wrapper, ) def configure_ddp(self) -> None: self.pre_configure_ddp() self.model = self._setup_model(LightningDistributedModule(self.model)) self._register_ddp_hooks() def determine_ddp_device_ids(self): if self.root_device.type == "cpu": return None return [self.root_device.index] def _collect_rank_zero_results(self, trainer: "pl.Trainer", results: Any) -> Optional["_SpawnOutput"]: rank_zero_debug("Finalizing the DDP spawn environment.") checkpoint_callback = trainer.checkpoint_callback best_model_path = checkpoint_callback.best_model_path if checkpoint_callback else None # requires to compute the state_dict on all processes in case Metrics are present state_dict = self.lightning_module.state_dict() if self.global_rank != 0: return # save the last weights weights_path = None if trainer.state.fn == TrainerFn.FITTING: weights_path = os.path.join(trainer.default_root_dir, ".temp.ckpt") self.checkpoint_io.save_checkpoint(state_dict, weights_path) # adds the `callback_metrics` to the queue extra = _FakeQueue() if is_overridden("add_to_queue", self.lightning_module): # TODO: Remove the if in v1.7 self.lightning_module.add_to_queue(extra) self.add_to_queue(trainer, extra) return _SpawnOutput(best_model_path, weights_path, trainer.state, results, extra) def _recover_results_in_main_process(self, spawn_output: "_SpawnOutput", trainer: "pl.Trainer") -> None: # transfer back the best path to the trainer if trainer.checkpoint_callback: trainer.checkpoint_callback.best_model_path = spawn_output.best_model_path # TODO: pass also best score # load last weights if spawn_output.weights_path is not None: ckpt = self.checkpoint_io.load_checkpoint( spawn_output.weights_path, map_location=(lambda storage, loc: storage) ) self.lightning_module.load_state_dict(ckpt) self.checkpoint_io.remove_checkpoint(spawn_output.weights_path) trainer.state = spawn_output.trainer_state # get the `callback_metrics` and set it to the trainer if is_overridden("get_from_queue", self.lightning_module): # only in case the user does not override it. # TODO: Remove the if in v1.7 self.lightning_module.get_from_queue(spawn_output.extra) self.get_from_queue(trainer, spawn_output.extra) def barrier(self, *args, **kwargs) -> None: if not distributed_available(): return if _TORCH_GREATER_EQUAL_1_8 and torch.distributed.get_backend() == "nccl": torch.distributed.barrier(device_ids=self.determine_ddp_device_ids()) else: torch.distributed.barrier() def broadcast(self, obj: object, src: int = 0) -> object: if not distributed_available(): return obj obj = [obj] if self.global_rank != src: obj = [None] torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD) return obj[0] def model_to_device(self): if self.root_device.type == "cuda": # set the device on the spawned subprocesses torch.cuda.set_device(self.root_device) self.model.to(self.root_device) def pre_backward(self, closure_loss: torch.Tensor) -> None: """Run before precision plugin executes backward.""" if not self.lightning_module.automatic_optimization: prepare_for_backward(self.model, closure_loss) def reduce(self, tensor, group: Optional[Any] = None, reduce_op: Union[ReduceOp, str] = "mean") -> torch.Tensor: """Reduces a tensor from several distributed processes to one aggregated tensor. Args: tensor: the tensor to sync and reduce group: the process group to gather results from. Defaults to all processes (world) reduce_op: the reduction operation. Defaults to 'mean'/'avg'. Can also be a string 'sum' to calculate the sum during reduction. Return: reduced value, except when the input was not a tensor the output remains is unchanged """ if isinstance(tensor, torch.Tensor): tensor = sync_ddp_if_available(tensor, group, reduce_op=reduce_op) return tensor def training_step(self, *args, **kwargs) -> STEP_OUTPUT: with self.precision_plugin.train_step_context(): return self.model(*args, **kwargs) def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: with self.precision_plugin.val_step_context(): if isinstance(self.model, DistributedDataParallel): # used when calling `trainer.fit` return self.model(*args, **kwargs) else: # used when calling `trainer.validate` return self.lightning_module.validation_step(*args, **kwargs) def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: with self.precision_plugin.test_step_context(): return self.lightning_module.test_step(*args, **kwargs) def predict_step(self, *args, **kwargs) -> STEP_OUTPUT: with self.precision_plugin.predict_step_context(): return self.lightning_module.predict_step(*args, **kwargs) def post_training_step(self): if not self.lightning_module.automatic_optimization: self.model.require_backward_grad_sync = True def add_to_queue(self, trainer: "pl.Trainer", queue: "_FakeQueue") -> None: """Appends the :attr:`trainer.callback_metrics` dictionary to the given queue. To avoid issues with memory sharing, we cast the data to numpy. Args: trainer: reference to the Trainer. queue: the instance of the queue to append the data. """ callback_metrics: dict = apply_to_collection( trainer.callback_metrics, torch.Tensor, lambda x: x.cpu().numpy() ) # send as numpy to avoid issues with memory sharing queue.put(callback_metrics) def get_from_queue(self, trainer: "pl.Trainer", queue: "_FakeQueue") -> None: """Retrieve the :attr:`trainer.callback_metrics` dictionary from the given queue. To preserve consistency, we cast back the data to ``torch.Tensor``. Args: trainer: reference to the Trainer. queue: the instance of the queue from where to get the data. """ # NOTE: `add_to_queue` needs to be called before callback_metrics: dict = queue.get() trainer.callback_metrics.update(apply_to_collection(callback_metrics, np.ndarray, lambda x: torch.tensor(x))) @classmethod def register_strategies(cls, strategy_registry: Dict) -> None: strategy_registry.register( "ddp_spawn_find_unused_parameters_false", cls, description="DDPSpawn Strategy with `find_unused_parameters` as False", find_unused_parameters=False, ) def teardown(self) -> None: super().teardown() if isinstance(self.model, DistributedDataParallel): self.model = self.lightning_module if self.sync_batchnorm: self.model = _revert_sync_batchnorm(self.model) if self.root_device.type == "cuda": # GPU teardown self.lightning_module.cpu() # clean up memory torch.cuda.empty_cache() class _FakeQueue(UserList): """Simulates a :class:`torch.multiprocessing.queue.SimpleQueue` interface using the Python list.""" def get(self) -> Any: return self.pop(0) def put(self, item: Any) -> None: self.append(item) def empty(self) -> bool: return len(self) == 0 class _SpawnOutput(NamedTuple): best_model_path: Optional[_PATH] weights_path: Optional[_PATH] trainer_state: TrainerState trainer_results: Any extra: _FakeQueue
[]
[]
[ "MASTER_PORT" ]
[]
["MASTER_PORT"]
python
1
0
sandbox/asgi.py
""" ASGI config for marion project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application from configurations import importer os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings") os.environ.setdefault("DJANGO_CONFIGURATION", "Development") importer.install() application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
utils/utils.go
package utils import ( "bytes" "crypto/rand" "crypto/sha1" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" "os" "os/exec" "path/filepath" "runtime" "strconv" "strings" "sync" "syscall" "time" "github.com/dotcloud/docker/dockerversion" ) type KeyValuePair struct { Key string Value string } // A common interface to access the Fatal method of // both testing.B and testing.T. type Fataler interface { Fatal(args ...interface{}) } // Go is a basic promise implementation: it wraps calls a function in a goroutine, // and returns a channel which will later return the function's return value. func Go(f func() error) chan error { ch := make(chan error, 1) go func() { ch <- f() }() return ch } // Request a given URL and return an io.Reader func Download(url string) (resp *http.Response, err error) { if resp, err = http.Get(url); err != nil { return nil, err } if resp.StatusCode >= 400 { return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) } return resp, nil } func logf(level string, format string, a ...interface{}) { // Retrieve the stack infos _, file, line, ok := runtime.Caller(2) if !ok { file = "<unknown>" line = -1 } else { file = file[strings.LastIndex(file, "/")+1:] } fmt.Fprintf(os.Stderr, fmt.Sprintf("[%s] %s:%d %s\n", level, file, line, format), a...) } // Debug function, if the debug flag is set, then display. Do nothing otherwise // If Docker is in damon mode, also send the debug info on the socket func Debugf(format string, a ...interface{}) { if os.Getenv("DEBUG") != "" { logf("debug", format, a...) } } func Errorf(format string, a ...interface{}) { logf("error", format, a...) } func Trunc(s string, maxlen int) string { if len(s) <= maxlen { return s } return s[:maxlen] } // Figure out the absolute path of our own binary (if it's still around). func SelfPath() string { path, err := exec.LookPath(os.Args[0]) if err != nil { if os.IsNotExist(err) { return "" } if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) { return "" } panic(err) } path, err = filepath.Abs(path) if err != nil { if os.IsNotExist(err) { return "" } panic(err) } return path } func dockerInitSha1(target string) string { f, err := os.Open(target) if err != nil { return "" } defer f.Close() h := sha1.New() _, err = io.Copy(h, f) if err != nil { return "" } return hex.EncodeToString(h.Sum(nil)) } func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this) if target == "" { return false } if dockerversion.IAMSTATIC { if selfPath == "" { return false } if target == selfPath { return true } targetFileInfo, err := os.Lstat(target) if err != nil { return false } selfPathFileInfo, err := os.Lstat(selfPath) if err != nil { return false } return os.SameFile(targetFileInfo, selfPathFileInfo) } return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1 } // Figure out the path of our dockerinit (which may be SelfPath()) func DockerInitPath(localCopy string) string { selfPath := SelfPath() if isValidDockerInitPath(selfPath, selfPath) { // if we're valid, don't bother checking anything else return selfPath } var possibleInits = []string{ localCopy, dockerversion.INITPATH, filepath.Join(filepath.Dir(selfPath), "dockerinit"), // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." // http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec "/usr/libexec/docker/dockerinit", "/usr/local/libexec/docker/dockerinit", // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts." // http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA "/usr/lib/docker/dockerinit", "/usr/local/lib/docker/dockerinit", } for _, dockerInit := range possibleInits { if dockerInit == "" { continue } path, err := exec.LookPath(dockerInit) if err == nil { path, err = filepath.Abs(path) if err != nil { // LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail? panic(err) } if isValidDockerInitPath(path, selfPath) { return path } } } return "" } type NopWriter struct{} func (*NopWriter) Write(buf []byte) (int, error) { return len(buf), nil } type nopWriteCloser struct { io.Writer } func (w *nopWriteCloser) Close() error { return nil } func NopWriteCloser(w io.Writer) io.WriteCloser { return &nopWriteCloser{w} } type bufReader struct { sync.Mutex buf *bytes.Buffer reader io.Reader err error wait sync.Cond } func NewBufReader(r io.Reader) *bufReader { reader := &bufReader{ buf: &bytes.Buffer{}, reader: r, } reader.wait.L = &reader.Mutex go reader.drain() return reader } func (r *bufReader) drain() { buf := make([]byte, 1024) for { n, err := r.reader.Read(buf) r.Lock() if err != nil { r.err = err } else { r.buf.Write(buf[0:n]) } r.wait.Signal() r.Unlock() if err != nil { break } } } func (r *bufReader) Read(p []byte) (n int, err error) { r.Lock() defer r.Unlock() for { n, err = r.buf.Read(p) if n > 0 { return n, err } if r.err != nil { return 0, r.err } r.wait.Wait() } } func (r *bufReader) Close() error { closer, ok := r.reader.(io.ReadCloser) if !ok { return nil } return closer.Close() } type WriteBroadcaster struct { sync.Mutex buf *bytes.Buffer streams map[string](map[io.WriteCloser]struct{}) } func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser, stream string) { w.Lock() if _, ok := w.streams[stream]; !ok { w.streams[stream] = make(map[io.WriteCloser]struct{}) } w.streams[stream][writer] = struct{}{} w.Unlock() } type JSONLog struct { Log string `json:"log,omitempty"` Stream string `json:"stream,omitempty"` Created time.Time `json:"time"` } func (jl *JSONLog) Format(format string) (string, error) { if format == "" { return jl.Log, nil } if format == "json" { m, err := json.Marshal(jl) return string(m), err } return fmt.Sprintf("[%s] %s", jl.Created.Format(format), jl.Log), nil } func WriteLog(src io.Reader, dst io.WriteCloser, format string) error { dec := json.NewDecoder(src) for { l := &JSONLog{} if err := dec.Decode(l); err == io.EOF { return nil } else if err != nil { Errorf("Error streaming logs: %s", err) return err } line, err := l.Format(format) if err != nil { return err } fmt.Fprintf(dst, "%s", line) } } type LogFormatter struct { wc io.WriteCloser timeFormat string } func (w *WriteBroadcaster) Write(p []byte) (n int, err error) { created := time.Now().UTC() w.Lock() defer w.Unlock() if writers, ok := w.streams[""]; ok { for sw := range writers { if n, err := sw.Write(p); err != nil || n != len(p) { // On error, evict the writer delete(writers, sw) } } } w.buf.Write(p) lines := []string{} for { line, err := w.buf.ReadString('\n') if err != nil { w.buf.Write([]byte(line)) break } lines = append(lines, line) } if len(lines) != 0 { for stream, writers := range w.streams { if stream == "" { continue } var lp []byte for _, line := range lines { b, err := json.Marshal(&JSONLog{Log: line, Stream: stream, Created: created}) if err != nil { Errorf("Error making JSON log line: %s", err) } lp = append(lp, b...) lp = append(lp, '\n') } for sw := range writers { if _, err := sw.Write(lp); err != nil { delete(writers, sw) } } } } return len(p), nil } func (w *WriteBroadcaster) CloseWriters() error { w.Lock() defer w.Unlock() for _, writers := range w.streams { for w := range writers { w.Close() } } w.streams = make(map[string](map[io.WriteCloser]struct{})) return nil } func NewWriteBroadcaster() *WriteBroadcaster { return &WriteBroadcaster{ streams: make(map[string](map[io.WriteCloser]struct{})), buf: bytes.NewBuffer(nil), } } func GetTotalUsedFds() int { if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) } else { return len(fds) } return -1 } // TruncateID returns a shorthand version of a string identifier for convenience. // A collision with other shorthands is very unlikely, but possible. // In case of a collision a lookup with TruncIndex.Get() will fail, and the caller // will need to use a langer prefix, or the full-length Id. func TruncateID(id string) string { shortLen := 12 if len(id) < shortLen { shortLen = len(id) } return id[:shortLen] } // GenerateRandomID returns an unique id func GenerateRandomID() string { for { id := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, id); err != nil { panic(err) // This shouldn't happen } value := hex.EncodeToString(id) // if we try to parse the truncated for as an int and we don't have // an error then the value is all numberic and causes issues when // used as a hostname. ref #3869 if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil { continue } return value } } func ValidateID(id string) error { if id == "" { return fmt.Errorf("Id can't be empty") } if strings.Contains(id, ":") { return fmt.Errorf("Invalid character in id: ':'") } return nil } // Code c/c from io.Copy() modified to handle escape sequence func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { buf := make([]byte, 32*1024) for { nr, er := src.Read(buf) if nr > 0 { // ---- Docker addition // char 16 is C-p if nr == 1 && buf[0] == 16 { nr, er = src.Read(buf) // char 17 is C-q if nr == 1 && buf[0] == 17 { if err := src.Close(); err != nil { return 0, err } return 0, nil } } // ---- End of docker nw, ew := dst.Write(buf[0:nr]) if nw > 0 { written += int64(nw) } if ew != nil { err = ew break } if nr != nw { err = io.ErrShortWrite break } } if er == io.EOF { break } if er != nil { err = er break } } return written, err } func HashData(src io.Reader) (string, error) { h := sha256.New() if _, err := io.Copy(h, src); err != nil { return "", err } return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil } type KernelVersionInfo struct { Kernel int Major int Minor int Flavor string } func (k *KernelVersionInfo) String() string { return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) } // Compare two KernelVersionInfo struct. // Returns -1 if a < b, 0 if a == b, 1 it a > b func CompareKernelVersion(a, b *KernelVersionInfo) int { if a.Kernel < b.Kernel { return -1 } else if a.Kernel > b.Kernel { return 1 } if a.Major < b.Major { return -1 } else if a.Major > b.Major { return 1 } if a.Minor < b.Minor { return -1 } else if a.Minor > b.Minor { return 1 } return 0 } func GetKernelVersion() (*KernelVersionInfo, error) { var ( err error ) uts, err := uname() if err != nil { return nil, err } release := make([]byte, len(uts.Release)) i := 0 for _, c := range uts.Release { release[i] = byte(c) i++ } // Remove the \x00 from the release for Atoi to parse correctly release = release[:bytes.IndexByte(release, 0)] return ParseRelease(string(release)) } func ParseRelease(release string) (*KernelVersionInfo, error) { var ( kernel, major, minor, parsed int flavor, partial string ) // Ignore error from Sscanf to allow an empty flavor. Instead, just // make sure we got all the version numbers. parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) if parsed < 2 { return nil, errors.New("Can't parse kernel version " + release) } // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) if parsed < 1 { flavor = partial } return &KernelVersionInfo{ Kernel: kernel, Major: major, Minor: minor, Flavor: flavor, }, nil } // FIXME: this is deprecated by CopyWithTar in archive.go func CopyDirectory(source, dest string) error { if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil { return fmt.Errorf("Error copy: %s (%s)", err, output) } return nil } type NopFlusher struct{} func (f *NopFlusher) Flush() {} type WriteFlusher struct { sync.Mutex w io.Writer flusher http.Flusher } func (wf *WriteFlusher) Write(b []byte) (n int, err error) { wf.Lock() defer wf.Unlock() n, err = wf.w.Write(b) wf.flusher.Flush() return n, err } // Flush the stream immediately. func (wf *WriteFlusher) Flush() { wf.Lock() defer wf.Unlock() wf.flusher.Flush() } func NewWriteFlusher(w io.Writer) *WriteFlusher { var flusher http.Flusher if f, ok := w.(http.Flusher); ok { flusher = f } else { flusher = &NopFlusher{} } return &WriteFlusher{w: w, flusher: flusher} } func NewHTTPRequestError(msg string, res *http.Response) error { return &JSONError{ Message: msg, Code: res.StatusCode, } } func IsURL(str string) bool { return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") } func IsGIT(str string) bool { return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "[email protected]:") || (strings.HasSuffix(str, ".git") && IsURL(str)) } // CheckLocalDns looks into the /etc/resolv.conf, // it returns true if there is a local nameserver or if there is no nameserver. func CheckLocalDns(resolvConf []byte) bool { for _, line := range GetLines(resolvConf, []byte("#")) { if !bytes.Contains(line, []byte("nameserver")) { continue } for _, ip := range [][]byte{ []byte("127.0.0.1"), []byte("127.0.1.1"), } { if bytes.Contains(line, ip) { return true } } return false } return true } // GetLines parses input into lines and strips away comments. func GetLines(input []byte, commentMarker []byte) [][]byte { lines := bytes.Split(input, []byte("\n")) var output [][]byte for _, currentLine := range lines { var commentIndex = bytes.Index(currentLine, commentMarker) if commentIndex == -1 { output = append(output, currentLine) } else { output = append(output, currentLine[:commentIndex]) } } return output } // FIXME: Change this not to receive default value as parameter func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { var ( proto string host string port int ) addr = strings.TrimSpace(addr) switch { case addr == "tcp://": return "", fmt.Errorf("Invalid bind address format: %s", addr) case strings.HasPrefix(addr, "unix://"): proto = "unix" addr = strings.TrimPrefix(addr, "unix://") if addr == "" { addr = defaultUnix } case strings.HasPrefix(addr, "tcp://"): proto = "tcp" addr = strings.TrimPrefix(addr, "tcp://") case strings.HasPrefix(addr, "fd://"): return addr, nil case addr == "": proto = "unix" addr = defaultUnix default: if strings.Contains(addr, "://") { return "", fmt.Errorf("Invalid bind address protocol: %s", addr) } proto = "tcp" } if proto != "unix" && strings.Contains(addr, ":") { hostParts := strings.Split(addr, ":") if len(hostParts) != 2 { return "", fmt.Errorf("Invalid bind address format: %s", addr) } if hostParts[0] != "" { host = hostParts[0] } else { host = defaultHost } if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 { port = p } else { return "", fmt.Errorf("Invalid bind address format: %s", addr) } } else if proto == "tcp" && !strings.Contains(addr, ":") { return "", fmt.Errorf("Invalid bind address format: %s", addr) } else { host = addr } if proto == "unix" { return fmt.Sprintf("%s://%s", proto, host), nil } return fmt.Sprintf("%s://%s:%d", proto, host, port), nil } // Get a repos name and returns the right reposName + tag // The tag can be confusing because of a port in a repository name. // Ex: localhost.localdomain:5000/samalba/hipache:latest func ParseRepositoryTag(repos string) (string, string) { n := strings.LastIndex(repos, ":") if n < 0 { return repos, "" } if tag := repos[n+1:]; !strings.Contains(tag, "/") { return repos[:n], tag } return repos, "" } // An StatusError reports an unsuccessful exit by a command. type StatusError struct { Status string StatusCode int } func (e *StatusError) Error() string { return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) } func quote(word string, buf *bytes.Buffer) { // Bail out early for "simple" strings if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { buf.WriteString(word) return } buf.WriteString("'") for i := 0; i < len(word); i++ { b := word[i] if b == '\'' { // Replace literal ' with a close ', a \', and a open ' buf.WriteString("'\\''") } else { buf.WriteByte(b) } } buf.WriteString("'") } // Take a list of strings and escape them so they will be handled right // when passed as arguments to an program via a shell func ShellQuoteArguments(args []string) string { var buf bytes.Buffer for i, arg := range args { if i != 0 { buf.WriteByte(' ') } quote(arg, &buf) } return buf.String() } func PartParser(template, data string) (map[string]string, error) { // ip:public:private var ( templateParts = strings.Split(template, ":") parts = strings.Split(data, ":") out = make(map[string]string, len(templateParts)) ) if len(parts) != len(templateParts) { return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) } for i, t := range templateParts { value := "" if len(parts) > i { value = parts[i] } out[t] = value } return out, nil } var globalTestID string // TestDirectory creates a new temporary directory and returns its path. // The contents of directory at path `templateDir` is copied into the // new directory. func TestDirectory(templateDir string) (dir string, err error) { if globalTestID == "" { globalTestID = RandomString()[:4] } prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) if prefix == "" { prefix = "docker-test-" } dir, err = ioutil.TempDir("", prefix) if err = os.Remove(dir); err != nil { return } if templateDir != "" { if err = CopyDirectory(templateDir, dir); err != nil { return } } return } // GetCallerName introspects the call stack and returns the name of the // function `depth` levels down in the stack. func GetCallerName(depth int) string { // Use the caller function name as a prefix. // This helps trace temp directories back to their test. pc, _, _, _ := runtime.Caller(depth + 1) callerLongName := runtime.FuncForPC(pc).Name() parts := strings.Split(callerLongName, ".") callerShortName := parts[len(parts)-1] return callerShortName } func CopyFile(src, dst string) (int64, error) { if src == dst { return 0, nil } sf, err := os.Open(src) if err != nil { return 0, err } defer sf.Close() if err := os.Remove(dst); err != nil && !os.IsNotExist(err) { return 0, err } df, err := os.Create(dst) if err != nil { return 0, err } defer df.Close() return io.Copy(df, sf) } type readCloserWrapper struct { io.Reader closer func() error } func (r *readCloserWrapper) Close() error { return r.closer() } func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { return &readCloserWrapper{ Reader: r, closer: closer, } } // ReplaceOrAppendValues returns the defaults with the overrides either // replaced by env key or appended to the list func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { cache := make(map[string]int, len(defaults)) for i, e := range defaults { parts := strings.SplitN(e, "=", 2) cache[parts[0]] = i } for _, value := range overrides { parts := strings.SplitN(value, "=", 2) if i, exists := cache[parts[0]]; exists { defaults[i] = value } else { defaults = append(defaults, value) } } return defaults } // ReadSymlinkedDirectory returns the target directory of a symlink. // The target of the symbolic link may not be a file. func ReadSymlinkedDirectory(path string) (string, error) { var realPath string var err error if realPath, err = filepath.Abs(path); err != nil { return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) } realPathInfo, err := os.Stat(realPath) if err != nil { return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) } if !realPathInfo.Mode().IsDir() { return "", fmt.Errorf("canonical path points to a file '%s'", realPath) } return realPath, nil } func ParseKeyValueOpt(opt string) (string, string, error) { parts := strings.SplitN(opt, "=", 2) if len(parts) != 2 { return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) } return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } // TreeSize walks a directory tree and returns its total size in bytes. func TreeSize(dir string) (size int64, err error) { data := make(map[uint64]struct{}) err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { // Ignore directory sizes if fileInfo == nil { return nil } s := fileInfo.Size() if fileInfo.IsDir() || s == 0 { return nil } // Check inode to handle hard links correctly inode := fileInfo.Sys().(*syscall.Stat_t).Ino // inode is not a uint64 on all platforms. Cast it to avoid issues. if _, exists := data[uint64(inode)]; exists { return nil } // inode is not a uint64 on all platforms. Cast it to avoid issues. data[uint64(inode)] = struct{}{} size += s return nil }) return } // ValidateContextDirectory checks if all the contents of the directory // can be read and returns an error if some files can't be read // symlinks which point to non-existing files don't trigger an error func ValidateContextDirectory(srcPath string) error { var finalError error filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { // skip this directory/file if it's not in the path, it won't get added to the context _, err = filepath.Rel(srcPath, filePath) if err != nil && os.IsPermission(err) { return nil } if _, err := os.Stat(filePath); err != nil && os.IsPermission(err) { finalError = fmt.Errorf("can't stat '%s'", filePath) return err } // skip checking if symlinks point to non-existing files, such symlinks can be useful lstat, _ := os.Lstat(filePath) if lstat.Mode()&os.ModeSymlink == os.ModeSymlink { return err } if !f.IsDir() { currentFile, err := os.Open(filePath) if err != nil && os.IsPermission(err) { finalError = fmt.Errorf("no permission to read from '%s'", filePath) return err } else { currentFile.Close() } } return nil }) return finalError }
[ "\"DEBUG\"" ]
[]
[ "DEBUG" ]
[]
["DEBUG"]
go
1
0
api/line.go
package api import ( "errors" "net/http" "net/url" "os" "strings" ) const URL = "https://notify-api.line.me/api/notify" func Notify(msg string) error { accessToken := os.Getenv("LINE_NOTIFY_TOKEN") if accessToken == "" { return errors.New("not found LINE_NOTIFY_TOKEN") } u, err := url.ParseRequestURI(URL) if err != nil { return err } c := &http.Client{} form := url.Values{} form.Add("message", msg) body := strings.NewReader(form.Encode()) req, err := http.NewRequest("POST", u.String(), body) if err != nil { return err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") req.Header.Set("Authorization", "Bearer "+accessToken) _, err = c.Do(req) if err != nil { return err } return err }
[ "\"LINE_NOTIFY_TOKEN\"" ]
[]
[ "LINE_NOTIFY_TOKEN" ]
[]
["LINE_NOTIFY_TOKEN"]
go
1
0
rpctest/utils.go
// Copyright (c) 2016 The btcsuite developers // Copyright (c) 2017-2019 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package rpctest import ( "context" "reflect" "runtime" "syscall" "testing" "time" dcrdtypes "github.com/decred/dcrd/rpc/jsonrpc/types/v3" "github.com/decred/dcrd/rpcclient/v7" ) // JoinType is an enum representing a particular type of "node join". A node // join is a synchronization tool used to wait until a subset of nodes have a // consistent state with respect to an attribute. type JoinType uint8 const ( // Blocks is a JoinType which waits until all nodes share the same // block height. Blocks JoinType = iota // Mempools is a JoinType which blocks until all nodes have identical // mempool. Mempools ) // JoinNodes is a synchronization tool used to block until all passed nodes are // fully synced with respect to an attribute. This function will block for a // period of time, finally returning once all nodes are synced according to the // passed JoinType. This function be used to ensure all active test // harnesses are at a consistent state before proceeding to an assertion or // check within rpc tests. func JoinNodes(nodes []*Harness, joinType JoinType) error { switch joinType { case Blocks: return syncBlocks(nodes) case Mempools: return syncMempools(nodes) } return nil } // syncMempools blocks until all nodes have identical mempools. func syncMempools(nodes []*Harness) error { ctx := context.Background() poolsMatch := false for !poolsMatch { retry: firstPool, err := nodes[0].Node.GetRawMempool(ctx, dcrdtypes.GRMAll) if err != nil { return err } // If all nodes have an identical mempool with respect to the // first node, then we're done. Otherwise, drop back to the top // of the loop and retry after a short wait period. for _, node := range nodes[1:] { nodePool, err := node.Node.GetRawMempool(ctx, dcrdtypes.GRMAll) if err != nil { return err } if !reflect.DeepEqual(firstPool, nodePool) { time.Sleep(time.Millisecond * 100) goto retry } } poolsMatch = true } return nil } // syncBlocks blocks until all nodes report the same block height. func syncBlocks(nodes []*Harness) error { blocksMatch := false ctx := context.Background() for !blocksMatch { retry: blockHeights := make(map[int64]struct{}) for _, node := range nodes { blockHeight, err := node.Node.GetBlockCount(ctx) if err != nil { return err } blockHeights[blockHeight] = struct{}{} if len(blockHeights) > 1 { time.Sleep(time.Millisecond * 100) goto retry } } blocksMatch = true } return nil } // ConnectNode establishes a new peer-to-peer connection between the "from" // harness and the "to" harness. The connection made is flagged as persistent, // therefore in the case of disconnects, "from" will attempt to reestablish a // connection to the "to" harness. func ConnectNode(from *Harness, to *Harness) error { tracef(from.t, "ConnectNode start") defer tracef(from.t, "ConnectNode end") ctx := context.Background() peerInfo, err := from.Node.GetPeerInfo(ctx) if err != nil { return err } numPeers := len(peerInfo) tracef(from.t, "ConnectNode numPeers: %v", numPeers) targetAddr := to.node.config.listen if err := from.Node.AddNode(ctx, targetAddr, rpcclient.ANAdd); err != nil { return err } tracef(from.t, "ConnectNode targetAddr: %v", targetAddr) // Block until a new connection has been established. peerInfo, err = from.Node.GetPeerInfo(ctx) if err != nil { return err } tracef(from.t, "ConnectNode peerInfo: %v", peerInfo) for len(peerInfo) <= numPeers { peerInfo, err = from.Node.GetPeerInfo(ctx) if err != nil { return err } } tracef(from.t, "ConnectNode len(peerInfo): %v", len(peerInfo)) return nil } // RemoveNode removes the peer-to-peer connection between the "from" harness and // the "to" harness. The connection is only removed in this direction, therefore // if the reverse connection exists, the nodes may still be connected. // // This function returns an error if the nodes were not previously connected. func RemoveNode(ctx context.Context, from *Harness, to *Harness) error { targetAddr := to.node.config.listen if err := from.Node.AddNode(ctx, targetAddr, rpcclient.ANRemove); err != nil { // AddNode(..., ANRemove) returns an error if the peer is not found return err } // Block until this particular connection has been dropped. for { peerInfo, err := from.Node.GetPeerInfo(ctx) if err != nil { return err } for _, p := range peerInfo { if p.Addr == targetAddr { // Nodes still connected. Skip and re-fetch the list of nodes. continue } } // If this point is reached, then the nodes are not connected anymore. break } return nil } // NodesConnected verifies whether there is a connection via the p2p interface // between the specified nodes. If allowReverse is true, connectivity is also // checked in the reverse direction (to->from). func NodesConnected(ctx context.Context, from, to *Harness, allowReverse bool) (bool, error) { peerInfo, err := from.Node.GetPeerInfo(ctx) if err != nil { return false, err } targetAddr := to.node.config.listen for _, p := range peerInfo { if p.Addr == targetAddr { return true, nil } } if !allowReverse { return false, nil } // Check in the reverse direction. peerInfo, err = to.Node.GetPeerInfo(ctx) if err != nil { return false, err } targetAddr = from.node.config.listen for _, p := range peerInfo { if p.Addr == targetAddr { return true, nil } } return false, nil } // TearDownAll tears down all active test harnesses. // XXX harness.TearDown() can hang with mutex held. func TearDownAll() error { harnessStateMtx.Lock() defer harnessStateMtx.Unlock() for _, harness := range testInstances { if err := harness.TearDown(); err != nil { return err } } return nil } // ActiveHarnesses returns a slice of all currently active test harnesses. A // test harness if considered "active" if it has been created, but not yet torn // down. // XXX this is dumb because whatever happens after this call is racing over the // Harness pointers. func ActiveHarnesses() []*Harness { harnessStateMtx.RLock() defer harnessStateMtx.RUnlock() activeNodes := make([]*Harness, 0, len(testInstances)) for _, harness := range testInstances { activeNodes = append(activeNodes, harness) } return activeNodes } // PanicAll tears down all active test harnesses. // XXX We ignore the mutex because it is *hopefully* locked when this is // called. func PanicAll(t *testing.T) { if runtime.GOOS == "windows" { t.Logf("sigabort not supported") return } for _, harness := range testInstances { // This is a little wonky but works. t.Logf("========================================================") t.Logf("Aborting: %v", harness.node.pid) err := harness.node.cmd.Process.Signal(syscall.SIGABRT) if err != nil { t.Logf("abort: %v", err) } // Allows for process to dump time.Sleep(2 * time.Second) } }
[]
[]
[]
[]
[]
go
null
null
null
manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'deploy_ml.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
gpMgmt/bin/gppylib/operations/test/unit/test_unit_deletesystem.py
#!/usr/bin/env python3 import unittest from gppylib.operations.deletesystem import validate_pgport from mock import patch, MagicMock, Mock class GpDeletesystemTestCase(unittest.TestCase): @patch('os.getenv', return_value=None) @patch('gppylib.operations.deletesystem.get_coordinatorport', return_value=12345) def test_validate_pgport_with_no_pgport_env(self, mock1, mock2): self.assertEqual(12345, validate_pgport('/foo')) @patch('os.getenv', return_value='2345') @patch('gppylib.operations.deletesystem.get_coordinatorport', return_value=12345) def test_validate_pgport_with_non_matching_pgport(self, mock1, mock2): coordinator_data_dir = '/foo' with self.assertRaisesRegex(Exception, 'PGPORT value in %s/postgresql.conf does not match PGPORT environment variable' % coordinator_data_dir): validate_pgport(coordinator_data_dir) @patch('os.getenv', return_value='12345') @patch('gppylib.operations.deletesystem.get_coordinatorport', return_value=12345) def test_validate_pgport_with_matching_pgport(self, mock1, mock2): self.assertEqual(12345, validate_pgport('/foo'))
[]
[]
[]
[]
[]
python
0
0
src/net/http/h2_bundle.go
// Code generated by golang.org/x/tools/cmd/bundle. //go:generate bundle -o h2_bundle.go -prefix http2 -underscore golang.org/x/net/http2 // Package http2 implements the HTTP/2 protocol. // // This package is low-level and intended to be used directly by very // few people. Most users will use it indirectly through the automatic // use by the net/http package (from Go 1.6 and later). // For use in earlier Go versions see ConfigureServer. (Transport support // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. // // See https://http2.golang.org/ for a test server running this code. // package http import ( "bufio" "bytes" "compress/gzip" "context" "crypto/tls" "encoding/binary" "errors" "fmt" "io" "io/ioutil" "log" "math" "net" "net/http/httptrace" "net/textproto" "net/url" "os" "reflect" "runtime" "sort" "strconv" "strings" "sync" "time" "golang_org/x/net/http2/hpack" "golang_org/x/net/idna" "golang_org/x/net/lex/httplex" ) // ClientConnPool manages a pool of HTTP/2 client connections. type http2ClientConnPool interface { GetClientConn(req *Request, addr string) (*http2ClientConn, error) MarkDead(*http2ClientConn) } // clientConnPoolIdleCloser is the interface implemented by ClientConnPool // implementations which can close their idle connections. type http2clientConnPoolIdleCloser interface { http2ClientConnPool closeIdleConnections() } var ( _ http2clientConnPoolIdleCloser = (*http2clientConnPool)(nil) _ http2clientConnPoolIdleCloser = http2noDialClientConnPool{} ) // TODO: use singleflight for dialing and addConnCalls? type http2clientConnPool struct { t *http2Transport mu sync.Mutex // TODO: maybe switch to RWMutex // TODO: add support for sharing conns based on cert names // (e.g. share conn for googleapis.com and appspot.com) conns map[string][]*http2ClientConn // key is host:port dialing map[string]*http2dialCall // currently in-flight dials keys map[*http2ClientConn][]string addConnCalls map[string]*http2addConnCall // in-flight addConnIfNeede calls } func (p *http2clientConnPool) GetClientConn(req *Request, addr string) (*http2ClientConn, error) { return p.getClientConn(req, addr, http2dialOnMiss) } const ( http2dialOnMiss = true http2noDialOnMiss = false ) func (p *http2clientConnPool) getClientConn(req *Request, addr string, dialOnMiss bool) (*http2ClientConn, error) { if http2isConnectionCloseRequest(req) && dialOnMiss { // It gets its own connection. const singleUse = true cc, err := p.t.dialClientConn(addr, singleUse) if err != nil { return nil, err } return cc, nil } p.mu.Lock() for _, cc := range p.conns[addr] { if cc.CanTakeNewRequest() { p.mu.Unlock() return cc, nil } } if !dialOnMiss { p.mu.Unlock() return nil, http2ErrNoCachedConn } call := p.getStartDialLocked(addr) p.mu.Unlock() <-call.done return call.res, call.err } // dialCall is an in-flight Transport dial call to a host. type http2dialCall struct { p *http2clientConnPool done chan struct{} // closed when done res *http2ClientConn // valid after done is closed err error // valid after done is closed } // requires p.mu is held. func (p *http2clientConnPool) getStartDialLocked(addr string) *http2dialCall { if call, ok := p.dialing[addr]; ok { return call } call := &http2dialCall{p: p, done: make(chan struct{})} if p.dialing == nil { p.dialing = make(map[string]*http2dialCall) } p.dialing[addr] = call go call.dial(addr) return call } // run in its own goroutine. func (c *http2dialCall) dial(addr string) { const singleUse = false // shared conn c.res, c.err = c.p.t.dialClientConn(addr, singleUse) close(c.done) c.p.mu.Lock() delete(c.p.dialing, addr) if c.err == nil { c.p.addConnLocked(addr, c.res) } c.p.mu.Unlock() } // addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't // already exist. It coalesces concurrent calls with the same key. // This is used by the http1 Transport code when it creates a new connection. Because // the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know // the protocol), it can get into a situation where it has multiple TLS connections. // This code decides which ones live or die. // The return value used is whether c was used. // c is never closed. func (p *http2clientConnPool) addConnIfNeeded(key string, t *http2Transport, c *tls.Conn) (used bool, err error) { p.mu.Lock() for _, cc := range p.conns[key] { if cc.CanTakeNewRequest() { p.mu.Unlock() return false, nil } } call, dup := p.addConnCalls[key] if !dup { if p.addConnCalls == nil { p.addConnCalls = make(map[string]*http2addConnCall) } call = &http2addConnCall{ p: p, done: make(chan struct{}), } p.addConnCalls[key] = call go call.run(t, key, c) } p.mu.Unlock() <-call.done if call.err != nil { return false, call.err } return !dup, nil } type http2addConnCall struct { p *http2clientConnPool done chan struct{} // closed when done err error } func (c *http2addConnCall) run(t *http2Transport, key string, tc *tls.Conn) { cc, err := t.NewClientConn(tc) p := c.p p.mu.Lock() if err != nil { c.err = err } else { p.addConnLocked(key, cc) } delete(p.addConnCalls, key) p.mu.Unlock() close(c.done) } func (p *http2clientConnPool) addConn(key string, cc *http2ClientConn) { p.mu.Lock() p.addConnLocked(key, cc) p.mu.Unlock() } // p.mu must be held func (p *http2clientConnPool) addConnLocked(key string, cc *http2ClientConn) { for _, v := range p.conns[key] { if v == cc { return } } if p.conns == nil { p.conns = make(map[string][]*http2ClientConn) } if p.keys == nil { p.keys = make(map[*http2ClientConn][]string) } p.conns[key] = append(p.conns[key], cc) p.keys[cc] = append(p.keys[cc], key) } func (p *http2clientConnPool) MarkDead(cc *http2ClientConn) { p.mu.Lock() defer p.mu.Unlock() for _, key := range p.keys[cc] { vv, ok := p.conns[key] if !ok { continue } newList := http2filterOutClientConn(vv, cc) if len(newList) > 0 { p.conns[key] = newList } else { delete(p.conns, key) } } delete(p.keys, cc) } func (p *http2clientConnPool) closeIdleConnections() { p.mu.Lock() defer p.mu.Unlock() for _, vv := range p.conns { for _, cc := range vv { cc.closeIfIdle() } } } func http2filterOutClientConn(in []*http2ClientConn, exclude *http2ClientConn) []*http2ClientConn { out := in[:0] for _, v := range in { if v != exclude { out = append(out, v) } } if len(in) != len(out) { in[len(in)-1] = nil } return out } // noDialClientConnPool is an implementation of http2.ClientConnPool // which never dials. We let the HTTP/1.1 client dial and use its TLS // connection instead. type http2noDialClientConnPool struct{ *http2clientConnPool } func (p http2noDialClientConnPool) GetClientConn(req *Request, addr string) (*http2ClientConn, error) { return p.getClientConn(req, addr, http2noDialOnMiss) } func http2configureTransport(t1 *Transport) (*http2Transport, error) { connPool := new(http2clientConnPool) t2 := &http2Transport{ ConnPool: http2noDialClientConnPool{connPool}, t1: t1, } connPool.t = t2 if err := http2registerHTTPSProtocol(t1, http2noDialH2RoundTripper{t2}); err != nil { return nil, err } if t1.TLSClientConfig == nil { t1.TLSClientConfig = new(tls.Config) } if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) } if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } upgradeFn := func(authority string, c *tls.Conn) RoundTripper { addr := http2authorityAddr("https", authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return http2erringRoundTripper{err} } else if !used { go c.Close() } return t2 } if m := t1.TLSNextProto; len(m) == 0 { t1.TLSNextProto = map[string]func(string, *tls.Conn) RoundTripper{ "h2": upgradeFn, } } else { m["h2"] = upgradeFn } return t2, nil } // registerHTTPSProtocol calls Transport.RegisterProtocol but // convering panics into errors. func http2registerHTTPSProtocol(t *Transport, rt RoundTripper) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("%v", e) } }() t.RegisterProtocol("https", rt) return nil } // noDialH2RoundTripper is a RoundTripper which only tries to complete the request // if there's already has a cached connection to the host. type http2noDialH2RoundTripper struct{ t *http2Transport } func (rt http2noDialH2RoundTripper) RoundTrip(req *Request) (*Response, error) { res, err := rt.t.RoundTrip(req) if err == http2ErrNoCachedConn { return nil, ErrSkipAltProtocol } return res, err } // An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. type http2ErrCode uint32 const ( http2ErrCodeNo http2ErrCode = 0x0 http2ErrCodeProtocol http2ErrCode = 0x1 http2ErrCodeInternal http2ErrCode = 0x2 http2ErrCodeFlowControl http2ErrCode = 0x3 http2ErrCodeSettingsTimeout http2ErrCode = 0x4 http2ErrCodeStreamClosed http2ErrCode = 0x5 http2ErrCodeFrameSize http2ErrCode = 0x6 http2ErrCodeRefusedStream http2ErrCode = 0x7 http2ErrCodeCancel http2ErrCode = 0x8 http2ErrCodeCompression http2ErrCode = 0x9 http2ErrCodeConnect http2ErrCode = 0xa http2ErrCodeEnhanceYourCalm http2ErrCode = 0xb http2ErrCodeInadequateSecurity http2ErrCode = 0xc http2ErrCodeHTTP11Required http2ErrCode = 0xd ) var http2errCodeName = map[http2ErrCode]string{ http2ErrCodeNo: "NO_ERROR", http2ErrCodeProtocol: "PROTOCOL_ERROR", http2ErrCodeInternal: "INTERNAL_ERROR", http2ErrCodeFlowControl: "FLOW_CONTROL_ERROR", http2ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", http2ErrCodeStreamClosed: "STREAM_CLOSED", http2ErrCodeFrameSize: "FRAME_SIZE_ERROR", http2ErrCodeRefusedStream: "REFUSED_STREAM", http2ErrCodeCancel: "CANCEL", http2ErrCodeCompression: "COMPRESSION_ERROR", http2ErrCodeConnect: "CONNECT_ERROR", http2ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", http2ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", http2ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", } func (e http2ErrCode) String() string { if s, ok := http2errCodeName[e]; ok { return s } return fmt.Sprintf("unknown error code 0x%x", uint32(e)) } // ConnectionError is an error that results in the termination of the // entire connection. type http2ConnectionError http2ErrCode func (e http2ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", http2ErrCode(e)) } // StreamError is an error that only affects one stream within an // HTTP/2 connection. type http2StreamError struct { StreamID uint32 Code http2ErrCode Cause error // optional additional detail } func http2streamError(id uint32, code http2ErrCode) http2StreamError { return http2StreamError{StreamID: id, Code: code} } func (e http2StreamError) Error() string { if e.Cause != nil { return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) } return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) } // 6.9.1 The Flow Control Window // "If a sender receives a WINDOW_UPDATE that causes a flow control // window to exceed this maximum it MUST terminate either the stream // or the connection, as appropriate. For streams, [...]; for the // connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." type http2goAwayFlowError struct{} func (http2goAwayFlowError) Error() string { return "connection exceeded flow control window size" } // Errors of this type are only returned by the frame parser functions // and converted into ConnectionError(ErrCodeProtocol). type http2connError struct { Code http2ErrCode Reason string } func (e http2connError) Error() string { return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) } type http2pseudoHeaderError string func (e http2pseudoHeaderError) Error() string { return fmt.Sprintf("invalid pseudo-header %q", string(e)) } type http2duplicatePseudoHeaderError string func (e http2duplicatePseudoHeaderError) Error() string { return fmt.Sprintf("duplicate pseudo-header %q", string(e)) } type http2headerFieldNameError string func (e http2headerFieldNameError) Error() string { return fmt.Sprintf("invalid header field name %q", string(e)) } type http2headerFieldValueError string func (e http2headerFieldValueError) Error() string { return fmt.Sprintf("invalid header field value %q", string(e)) } var ( http2errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") http2errPseudoAfterRegular = errors.New("pseudo header field after regular") ) // fixedBuffer is an io.ReadWriter backed by a fixed size buffer. // It never allocates, but moves old data as new data is written. type http2fixedBuffer struct { buf []byte r, w int } var ( http2errReadEmpty = errors.New("read from empty fixedBuffer") http2errWriteFull = errors.New("write on full fixedBuffer") ) // Read copies bytes from the buffer into p. // It is an error to read when no data is available. func (b *http2fixedBuffer) Read(p []byte) (n int, err error) { if b.r == b.w { return 0, http2errReadEmpty } n = copy(p, b.buf[b.r:b.w]) b.r += n if b.r == b.w { b.r = 0 b.w = 0 } return n, nil } // Len returns the number of bytes of the unread portion of the buffer. func (b *http2fixedBuffer) Len() int { return b.w - b.r } // Write copies bytes from p into the buffer. // It is an error to write more data than the buffer can hold. func (b *http2fixedBuffer) Write(p []byte) (n int, err error) { if b.r > 0 && len(p) > len(b.buf)-b.w { copy(b.buf, b.buf[b.r:b.w]) b.w -= b.r b.r = 0 } n = copy(b.buf[b.w:], p) b.w += n if n < len(p) { err = http2errWriteFull } return n, err } // flow is the flow control window's size. type http2flow struct { // n is the number of DATA bytes we're allowed to send. // A flow is kept both on a conn and a per-stream. n int32 // conn points to the shared connection-level flow that is // shared by all streams on that conn. It is nil for the flow // that's on the conn directly. conn *http2flow } func (f *http2flow) setConnFlow(cf *http2flow) { f.conn = cf } func (f *http2flow) available() int32 { n := f.n if f.conn != nil && f.conn.n < n { n = f.conn.n } return n } func (f *http2flow) take(n int32) { if n > f.available() { panic("internal error: took too much") } f.n -= n if f.conn != nil { f.conn.n -= n } } // add adds n bytes (positive or negative) to the flow control window. // It returns false if the sum would exceed 2^31-1. func (f *http2flow) add(n int32) bool { remain := (1<<31 - 1) - f.n if n > remain { return false } f.n += n return true } const http2frameHeaderLen = 9 var http2padZeros = make([]byte, 255) // zeros for padding // A FrameType is a registered frame type as defined in // http://http2.github.io/http2-spec/#rfc.section.11.2 type http2FrameType uint8 const ( http2FrameData http2FrameType = 0x0 http2FrameHeaders http2FrameType = 0x1 http2FramePriority http2FrameType = 0x2 http2FrameRSTStream http2FrameType = 0x3 http2FrameSettings http2FrameType = 0x4 http2FramePushPromise http2FrameType = 0x5 http2FramePing http2FrameType = 0x6 http2FrameGoAway http2FrameType = 0x7 http2FrameWindowUpdate http2FrameType = 0x8 http2FrameContinuation http2FrameType = 0x9 ) var http2frameName = map[http2FrameType]string{ http2FrameData: "DATA", http2FrameHeaders: "HEADERS", http2FramePriority: "PRIORITY", http2FrameRSTStream: "RST_STREAM", http2FrameSettings: "SETTINGS", http2FramePushPromise: "PUSH_PROMISE", http2FramePing: "PING", http2FrameGoAway: "GOAWAY", http2FrameWindowUpdate: "WINDOW_UPDATE", http2FrameContinuation: "CONTINUATION", } func (t http2FrameType) String() string { if s, ok := http2frameName[t]; ok { return s } return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) } // Flags is a bitmask of HTTP/2 flags. // The meaning of flags varies depending on the frame type. type http2Flags uint8 // Has reports whether f contains all (0 or more) flags in v. func (f http2Flags) Has(v http2Flags) bool { return (f & v) == v } // Frame-specific FrameHeader flag bits. const ( // Data Frame http2FlagDataEndStream http2Flags = 0x1 http2FlagDataPadded http2Flags = 0x8 // Headers Frame http2FlagHeadersEndStream http2Flags = 0x1 http2FlagHeadersEndHeaders http2Flags = 0x4 http2FlagHeadersPadded http2Flags = 0x8 http2FlagHeadersPriority http2Flags = 0x20 // Settings Frame http2FlagSettingsAck http2Flags = 0x1 // Ping Frame http2FlagPingAck http2Flags = 0x1 // Continuation Frame http2FlagContinuationEndHeaders http2Flags = 0x4 http2FlagPushPromiseEndHeaders http2Flags = 0x4 http2FlagPushPromisePadded http2Flags = 0x8 ) var http2flagName = map[http2FrameType]map[http2Flags]string{ http2FrameData: { http2FlagDataEndStream: "END_STREAM", http2FlagDataPadded: "PADDED", }, http2FrameHeaders: { http2FlagHeadersEndStream: "END_STREAM", http2FlagHeadersEndHeaders: "END_HEADERS", http2FlagHeadersPadded: "PADDED", http2FlagHeadersPriority: "PRIORITY", }, http2FrameSettings: { http2FlagSettingsAck: "ACK", }, http2FramePing: { http2FlagPingAck: "ACK", }, http2FrameContinuation: { http2FlagContinuationEndHeaders: "END_HEADERS", }, http2FramePushPromise: { http2FlagPushPromiseEndHeaders: "END_HEADERS", http2FlagPushPromisePadded: "PADDED", }, } // a frameParser parses a frame given its FrameHeader and payload // bytes. The length of payload will always equal fh.Length (which // might be 0). type http2frameParser func(fh http2FrameHeader, payload []byte) (http2Frame, error) var http2frameParsers = map[http2FrameType]http2frameParser{ http2FrameData: http2parseDataFrame, http2FrameHeaders: http2parseHeadersFrame, http2FramePriority: http2parsePriorityFrame, http2FrameRSTStream: http2parseRSTStreamFrame, http2FrameSettings: http2parseSettingsFrame, http2FramePushPromise: http2parsePushPromise, http2FramePing: http2parsePingFrame, http2FrameGoAway: http2parseGoAwayFrame, http2FrameWindowUpdate: http2parseWindowUpdateFrame, http2FrameContinuation: http2parseContinuationFrame, } func http2typeFrameParser(t http2FrameType) http2frameParser { if f := http2frameParsers[t]; f != nil { return f } return http2parseUnknownFrame } // A FrameHeader is the 9 byte header of all HTTP/2 frames. // // See http://http2.github.io/http2-spec/#FrameHeader type http2FrameHeader struct { valid bool // caller can access []byte fields in the Frame // Type is the 1 byte frame type. There are ten standard frame // types, but extension frame types may be written by WriteRawFrame // and will be returned by ReadFrame (as UnknownFrame). Type http2FrameType // Flags are the 1 byte of 8 potential bit flags per frame. // They are specific to the frame type. Flags http2Flags // Length is the length of the frame, not including the 9 byte header. // The maximum size is one byte less than 16MB (uint24), but only // frames up to 16KB are allowed without peer agreement. Length uint32 // StreamID is which stream this frame is for. Certain frames // are not stream-specific, in which case this field is 0. StreamID uint32 } // Header returns h. It exists so FrameHeaders can be embedded in other // specific frame types and implement the Frame interface. func (h http2FrameHeader) Header() http2FrameHeader { return h } func (h http2FrameHeader) String() string { var buf bytes.Buffer buf.WriteString("[FrameHeader ") h.writeDebug(&buf) buf.WriteByte(']') return buf.String() } func (h http2FrameHeader) writeDebug(buf *bytes.Buffer) { buf.WriteString(h.Type.String()) if h.Flags != 0 { buf.WriteString(" flags=") set := 0 for i := uint8(0); i < 8; i++ { if h.Flags&(1<<i) == 0 { continue } set++ if set > 1 { buf.WriteByte('|') } name := http2flagName[h.Type][http2Flags(1<<i)] if name != "" { buf.WriteString(name) } else { fmt.Fprintf(buf, "0x%x", 1<<i) } } } if h.StreamID != 0 { fmt.Fprintf(buf, " stream=%d", h.StreamID) } fmt.Fprintf(buf, " len=%d", h.Length) } func (h *http2FrameHeader) checkValid() { if !h.valid { panic("Frame accessor called on non-owned Frame") } } func (h *http2FrameHeader) invalidate() { h.valid = false } // frame header bytes. // Used only by ReadFrameHeader. var http2fhBytes = sync.Pool{ New: func() interface{} { buf := make([]byte, http2frameHeaderLen) return &buf }, } // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. // Most users should use Framer.ReadFrame instead. func http2ReadFrameHeader(r io.Reader) (http2FrameHeader, error) { bufp := http2fhBytes.Get().(*[]byte) defer http2fhBytes.Put(bufp) return http2readFrameHeader(*bufp, r) } func http2readFrameHeader(buf []byte, r io.Reader) (http2FrameHeader, error) { _, err := io.ReadFull(r, buf[:http2frameHeaderLen]) if err != nil { return http2FrameHeader{}, err } return http2FrameHeader{ Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])), Type: http2FrameType(buf[3]), Flags: http2Flags(buf[4]), StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1), valid: true, }, nil } // A Frame is the base interface implemented by all frame types. // Callers will generally type-assert the specific frame type: // *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc. // // Frames are only valid until the next call to Framer.ReadFrame. type http2Frame interface { Header() http2FrameHeader // invalidate is called by Framer.ReadFrame to make this // frame's buffers as being invalid, since the subsequent // frame will reuse them. invalidate() } // A Framer reads and writes Frames. type http2Framer struct { r io.Reader lastFrame http2Frame errDetail error // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. lastHeaderStream uint32 maxReadSize uint32 headerBuf [http2frameHeaderLen]byte // TODO: let getReadBuf be configurable, and use a less memory-pinning // allocator in server.go to minimize memory pinned for many idle conns. // Will probably also need to make frame invalidation have a hook too. getReadBuf func(size uint32) []byte readBuf []byte // cache for default getReadBuf maxWriteSize uint32 // zero means unlimited; TODO: implement w io.Writer wbuf []byte // AllowIllegalWrites permits the Framer's Write methods to // write frames that do not conform to the HTTP/2 spec. This // permits using the Framer to test other HTTP/2 // implementations' conformance to the spec. // If false, the Write methods will prefer to return an error // rather than comply. AllowIllegalWrites bool // AllowIllegalReads permits the Framer's ReadFrame method // to return non-compliant frames or frame orders. // This is for testing and permits using the Framer to test // other HTTP/2 implementations' conformance to the spec. // It is not compatible with ReadMetaHeaders. AllowIllegalReads bool // ReadMetaHeaders if non-nil causes ReadFrame to merge // HEADERS and CONTINUATION frames together and return // MetaHeadersFrame instead. ReadMetaHeaders *hpack.Decoder // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE. // It's used only if ReadMetaHeaders is set; 0 means a sane default // (currently 16MB) // If the limit is hit, MetaHeadersFrame.Truncated is set true. MaxHeaderListSize uint32 logReads bool debugFramer *http2Framer // only use for logging written writes debugFramerBuf *bytes.Buffer } func (fr *http2Framer) maxHeaderListSize() uint32 { if fr.MaxHeaderListSize == 0 { return 16 << 20 } return fr.MaxHeaderListSize } func (f *http2Framer) startWrite(ftype http2FrameType, flags http2Flags, streamID uint32) { f.wbuf = append(f.wbuf[:0], 0, 0, 0, byte(ftype), byte(flags), byte(streamID>>24), byte(streamID>>16), byte(streamID>>8), byte(streamID)) } func (f *http2Framer) endWrite() error { length := len(f.wbuf) - http2frameHeaderLen if length >= (1 << 24) { return http2ErrFrameTooLarge } _ = append(f.wbuf[:0], byte(length>>16), byte(length>>8), byte(length)) if http2logFrameWrites { f.logWrite() } n, err := f.w.Write(f.wbuf) if err == nil && n != len(f.wbuf) { err = io.ErrShortWrite } return err } func (f *http2Framer) logWrite() { if f.debugFramer == nil { f.debugFramerBuf = new(bytes.Buffer) f.debugFramer = http2NewFramer(nil, f.debugFramerBuf) f.debugFramer.logReads = false f.debugFramer.AllowIllegalReads = true } f.debugFramerBuf.Write(f.wbuf) fr, err := f.debugFramer.ReadFrame() if err != nil { log.Printf("http2: Framer %p: failed to decode just-written frame", f) return } log.Printf("http2: Framer %p: wrote %v", f, http2summarizeFrame(fr)) } func (f *http2Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } func (f *http2Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } func (f *http2Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } func (f *http2Framer) writeUint32(v uint32) { f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) } const ( http2minMaxFrameSize = 1 << 14 http2maxFrameSize = 1<<24 - 1 ) // NewFramer returns a Framer that writes frames to w and reads them from r. func http2NewFramer(w io.Writer, r io.Reader) *http2Framer { fr := &http2Framer{ w: w, r: r, logReads: http2logFrameReads, } fr.getReadBuf = func(size uint32) []byte { if cap(fr.readBuf) >= int(size) { return fr.readBuf[:size] } fr.readBuf = make([]byte, size) return fr.readBuf } fr.SetMaxReadFrameSize(http2maxFrameSize) return fr } // SetMaxReadFrameSize sets the maximum size of a frame // that will be read by a subsequent call to ReadFrame. // It is the caller's responsibility to advertise this // limit with a SETTINGS frame. func (fr *http2Framer) SetMaxReadFrameSize(v uint32) { if v > http2maxFrameSize { v = http2maxFrameSize } fr.maxReadSize = v } // ErrorDetail returns a more detailed error of the last error // returned by Framer.ReadFrame. For instance, if ReadFrame // returns a StreamError with code PROTOCOL_ERROR, ErrorDetail // will say exactly what was invalid. ErrorDetail is not guaranteed // to return a non-nil value and like the rest of the http2 package, // its return value is not protected by an API compatibility promise. // ErrorDetail is reset after the next call to ReadFrame. func (fr *http2Framer) ErrorDetail() error { return fr.errDetail } // ErrFrameTooLarge is returned from Framer.ReadFrame when the peer // sends a frame that is larger than declared with SetMaxReadFrameSize. var http2ErrFrameTooLarge = errors.New("http2: frame too large") // terminalReadFrameError reports whether err is an unrecoverable // error from ReadFrame and no other frames should be read. func http2terminalReadFrameError(err error) bool { if _, ok := err.(http2StreamError); ok { return false } return err != nil } // ReadFrame reads a single frame. The returned Frame is only valid // until the next call to ReadFrame. // // If the frame is larger than previously set with SetMaxReadFrameSize, the // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. func (fr *http2Framer) ReadFrame() (http2Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { fr.lastFrame.invalidate() } fh, err := http2readFrameHeader(fr.headerBuf[:], fr.r) if err != nil { return nil, err } if fh.Length > fr.maxReadSize { return nil, http2ErrFrameTooLarge } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { return nil, err } f, err := http2typeFrameParser(fh.Type)(fh, payload) if err != nil { if ce, ok := err.(http2connError); ok { return nil, fr.connError(ce.Code, ce.Reason) } return nil, err } if err := fr.checkFrameOrder(f); err != nil { return nil, err } if fr.logReads { log.Printf("http2: Framer %p: read %v", fr, http2summarizeFrame(f)) } if fh.Type == http2FrameHeaders && fr.ReadMetaHeaders != nil { return fr.readMetaFrame(f.(*http2HeadersFrame)) } return f, nil } // connError returns ConnectionError(code) but first // stashes away a public reason to the caller can optionally relay it // to the peer before hanging up on them. This might help others debug // their implementations. func (fr *http2Framer) connError(code http2ErrCode, reason string) error { fr.errDetail = errors.New(reason) return http2ConnectionError(code) } // checkFrameOrder reports an error if f is an invalid frame to return // next from ReadFrame. Mostly it checks whether HEADERS and // CONTINUATION frames are contiguous. func (fr *http2Framer) checkFrameOrder(f http2Frame) error { last := fr.lastFrame fr.lastFrame = f if fr.AllowIllegalReads { return nil } fh := f.Header() if fr.lastHeaderStream != 0 { if fh.Type != http2FrameContinuation { return fr.connError(http2ErrCodeProtocol, fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", fh.Type, fh.StreamID, last.Header().Type, fr.lastHeaderStream)) } if fh.StreamID != fr.lastHeaderStream { return fr.connError(http2ErrCodeProtocol, fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", fh.StreamID, fr.lastHeaderStream)) } } else if fh.Type == http2FrameContinuation { return fr.connError(http2ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) } switch fh.Type { case http2FrameHeaders, http2FrameContinuation: if fh.Flags.Has(http2FlagHeadersEndHeaders) { fr.lastHeaderStream = 0 } else { fr.lastHeaderStream = fh.StreamID } } return nil } // A DataFrame conveys arbitrary, variable-length sequences of octets // associated with a stream. // See http://http2.github.io/http2-spec/#rfc.section.6.1 type http2DataFrame struct { http2FrameHeader data []byte } func (f *http2DataFrame) StreamEnded() bool { return f.http2FrameHeader.Flags.Has(http2FlagDataEndStream) } // Data returns the frame's data octets, not including any padding // size byte or padding suffix bytes. // The caller must not retain the returned memory past the next // call to ReadFrame. func (f *http2DataFrame) Data() []byte { f.checkValid() return f.data } func http2parseDataFrame(fh http2FrameHeader, payload []byte) (http2Frame, error) { if fh.StreamID == 0 { return nil, http2connError{http2ErrCodeProtocol, "DATA frame with stream ID 0"} } f := &http2DataFrame{ http2FrameHeader: fh, } var padSize byte if fh.Flags.Has(http2FlagDataPadded) { var err error payload, padSize, err = http2readByte(payload) if err != nil { return nil, err } } if int(padSize) > len(payload) { return nil, http2connError{http2ErrCodeProtocol, "pad size larger than data payload"} } f.data = payload[:len(payload)-int(padSize)] return f, nil } var ( http2errStreamID = errors.New("invalid stream ID") http2errDepStreamID = errors.New("invalid dependent stream ID") http2errPadLength = errors.New("pad length too large") ) func http2validStreamIDOrZero(streamID uint32) bool { return streamID&(1<<31) == 0 } func http2validStreamID(streamID uint32) bool { return streamID != 0 && streamID&(1<<31) == 0 } // WriteData writes a DATA frame. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility not to violate the maximum frame size // and to not call other Write methods concurrently. func (f *http2Framer) WriteData(streamID uint32, endStream bool, data []byte) error { return f.WriteDataPadded(streamID, endStream, data, nil) } // WriteData writes a DATA frame with optional padding. // // If pad is nil, the padding bit is not sent. // The length of pad must not exceed 255 bytes. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility not to violate the maximum frame size // and to not call other Write methods concurrently. func (f *http2Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { if !http2validStreamID(streamID) && !f.AllowIllegalWrites { return http2errStreamID } if len(pad) > 255 { return http2errPadLength } var flags http2Flags if endStream { flags |= http2FlagDataEndStream } if pad != nil { flags |= http2FlagDataPadded } f.startWrite(http2FrameData, flags, streamID) if pad != nil { f.wbuf = append(f.wbuf, byte(len(pad))) } f.wbuf = append(f.wbuf, data...) f.wbuf = append(f.wbuf, pad...) return f.endWrite() } // A SettingsFrame conveys configuration parameters that affect how // endpoints communicate, such as preferences and constraints on peer // behavior. // // See http://http2.github.io/http2-spec/#SETTINGS type http2SettingsFrame struct { http2FrameHeader p []byte } func http2parseSettingsFrame(fh http2FrameHeader, p []byte) (http2Frame, error) { if fh.Flags.Has(http2FlagSettingsAck) && fh.Length > 0 { return nil, http2ConnectionError(http2ErrCodeFrameSize) } if fh.StreamID != 0 { return nil, http2ConnectionError(http2ErrCodeProtocol) } if len(p)%6 != 0 { return nil, http2ConnectionError(http2ErrCodeFrameSize) } f := &http2SettingsFrame{http2FrameHeader: fh, p: p} if v, ok := f.Value(http2SettingInitialWindowSize); ok && v > (1<<31)-1 { return nil, http2ConnectionError(http2ErrCodeFlowControl) } return f, nil } func (f *http2SettingsFrame) IsAck() bool { return f.http2FrameHeader.Flags.Has(http2FlagSettingsAck) } func (f *http2SettingsFrame) Value(s http2SettingID) (v uint32, ok bool) { f.checkValid() buf := f.p for len(buf) > 0 { settingID := http2SettingID(binary.BigEndian.Uint16(buf[:2])) if settingID == s { return binary.BigEndian.Uint32(buf[2:6]), true } buf = buf[6:] } return 0, false } // ForeachSetting runs fn for each setting. // It stops and returns the first error. func (f *http2SettingsFrame) ForeachSetting(fn func(http2Setting) error) error { f.checkValid() buf := f.p for len(buf) > 0 { if err := fn(http2Setting{ http2SettingID(binary.BigEndian.Uint16(buf[:2])), binary.BigEndian.Uint32(buf[2:6]), }); err != nil { return err } buf = buf[6:] } return nil } // WriteSettings writes a SETTINGS frame with zero or more settings // specified and the ACK bit not set. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *http2Framer) WriteSettings(settings ...http2Setting) error { f.startWrite(http2FrameSettings, 0, 0) for _, s := range settings { f.writeUint16(uint16(s.ID)) f.writeUint32(s.Val) } return f.endWrite() } // WriteSettings writes an empty SETTINGS frame with the ACK bit set. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *http2Framer) WriteSettingsAck() error { f.startWrite(http2FrameSettings, http2FlagSettingsAck, 0) return f.endWrite() } // A PingFrame is a mechanism for measuring a minimal round trip time // from the sender, as well as determining whether an idle connection // is still functional. // See http://http2.github.io/http2-spec/#rfc.section.6.7 type http2PingFrame struct { http2FrameHeader Data [8]byte } func (f *http2PingFrame) IsAck() bool { return f.Flags.Has(http2FlagPingAck) } func http2parsePingFrame(fh http2FrameHeader, payload []byte) (http2Frame, error) { if len(payload) != 8 { return nil, http2ConnectionError(http2ErrCodeFrameSize) } if fh.StreamID != 0 { return nil, http2ConnectionError(http2ErrCodeProtocol) } f := &http2PingFrame{http2FrameHeader: fh} copy(f.Data[:], payload) return f, nil } func (f *http2Framer) WritePing(ack bool, data [8]byte) error { var flags http2Flags if ack { flags = http2FlagPingAck } f.startWrite(http2FramePing, flags, 0) f.writeBytes(data[:]) return f.endWrite() } // A GoAwayFrame informs the remote peer to stop creating streams on this connection. // See http://http2.github.io/http2-spec/#rfc.section.6.8 type http2GoAwayFrame struct { http2FrameHeader LastStreamID uint32 ErrCode http2ErrCode debugData []byte } // DebugData returns any debug data in the GOAWAY frame. Its contents // are not defined. // The caller must not retain the returned memory past the next // call to ReadFrame. func (f *http2GoAwayFrame) DebugData() []byte { f.checkValid() return f.debugData } func http2parseGoAwayFrame(fh http2FrameHeader, p []byte) (http2Frame, error) { if fh.StreamID != 0 { return nil, http2ConnectionError(http2ErrCodeProtocol) } if len(p) < 8 { return nil, http2ConnectionError(http2ErrCodeFrameSize) } return &http2GoAwayFrame{ http2FrameHeader: fh, LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), ErrCode: http2ErrCode(binary.BigEndian.Uint32(p[4:8])), debugData: p[8:], }, nil } func (f *http2Framer) WriteGoAway(maxStreamID uint32, code http2ErrCode, debugData []byte) error { f.startWrite(http2FrameGoAway, 0, 0) f.writeUint32(maxStreamID & (1<<31 - 1)) f.writeUint32(uint32(code)) f.writeBytes(debugData) return f.endWrite() } // An UnknownFrame is the frame type returned when the frame type is unknown // or no specific frame type parser exists. type http2UnknownFrame struct { http2FrameHeader p []byte } // Payload returns the frame's payload (after the header). It is not // valid to call this method after a subsequent call to // Framer.ReadFrame, nor is it valid to retain the returned slice. // The memory is owned by the Framer and is invalidated when the next // frame is read. func (f *http2UnknownFrame) Payload() []byte { f.checkValid() return f.p } func http2parseUnknownFrame(fh http2FrameHeader, p []byte) (http2Frame, error) { return &http2UnknownFrame{fh, p}, nil } // A WindowUpdateFrame is used to implement flow control. // See http://http2.github.io/http2-spec/#rfc.section.6.9 type http2WindowUpdateFrame struct { http2FrameHeader Increment uint32 // never read with high bit set } func http2parseWindowUpdateFrame(fh http2FrameHeader, p []byte) (http2Frame, error) { if len(p) != 4 { return nil, http2ConnectionError(http2ErrCodeFrameSize) } inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff if inc == 0 { if fh.StreamID == 0 { return nil, http2ConnectionError(http2ErrCodeProtocol) } return nil, http2streamError(fh.StreamID, http2ErrCodeProtocol) } return &http2WindowUpdateFrame{ http2FrameHeader: fh, Increment: inc, }, nil } // WriteWindowUpdate writes a WINDOW_UPDATE frame. // The increment value must be between 1 and 2,147,483,647, inclusive. // If the Stream ID is zero, the window update applies to the // connection as a whole. func (f *http2Framer) WriteWindowUpdate(streamID, incr uint32) error { if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { return errors.New("illegal window increment value") } f.startWrite(http2FrameWindowUpdate, 0, streamID) f.writeUint32(incr) return f.endWrite() } // A HeadersFrame is used to open a stream and additionally carries a // header block fragment. type http2HeadersFrame struct { http2FrameHeader // Priority is set if FlagHeadersPriority is set in the FrameHeader. Priority http2PriorityParam headerFragBuf []byte // not owned } func (f *http2HeadersFrame) HeaderBlockFragment() []byte { f.checkValid() return f.headerFragBuf } func (f *http2HeadersFrame) HeadersEnded() bool { return f.http2FrameHeader.Flags.Has(http2FlagHeadersEndHeaders) } func (f *http2HeadersFrame) StreamEnded() bool { return f.http2FrameHeader.Flags.Has(http2FlagHeadersEndStream) } func (f *http2HeadersFrame) HasPriority() bool { return f.http2FrameHeader.Flags.Has(http2FlagHeadersPriority) } func http2parseHeadersFrame(fh http2FrameHeader, p []byte) (_ http2Frame, err error) { hf := &http2HeadersFrame{ http2FrameHeader: fh, } if fh.StreamID == 0 { return nil, http2connError{http2ErrCodeProtocol, "HEADERS frame with stream ID 0"} } var padLength uint8 if fh.Flags.Has(http2FlagHeadersPadded) { if p, padLength, err = http2readByte(p); err != nil { return } } if fh.Flags.Has(http2FlagHeadersPriority) { var v uint32 p, v, err = http2readUint32(p) if err != nil { return nil, err } hf.Priority.StreamDep = v & 0x7fffffff hf.Priority.Exclusive = (v != hf.Priority.StreamDep) p, hf.Priority.Weight, err = http2readByte(p) if err != nil { return nil, err } } if len(p)-int(padLength) <= 0 { return nil, http2streamError(fh.StreamID, http2ErrCodeProtocol) } hf.headerFragBuf = p[:len(p)-int(padLength)] return hf, nil } // HeadersFrameParam are the parameters for writing a HEADERS frame. type http2HeadersFrameParam struct { // StreamID is the required Stream ID to initiate. StreamID uint32 // BlockFragment is part (or all) of a Header Block. BlockFragment []byte // EndStream indicates that the header block is the last that // the endpoint will send for the identified stream. Setting // this flag causes the stream to enter one of "half closed" // states. EndStream bool // EndHeaders indicates that this frame contains an entire // header block and is not followed by any // CONTINUATION frames. EndHeaders bool // PadLength is the optional number of bytes of zeros to add // to this frame. PadLength uint8 // Priority, if non-zero, includes stream priority information // in the HEADER frame. Priority http2PriorityParam } // WriteHeaders writes a single HEADERS frame. // // This is a low-level header writing method. Encoding headers and // splitting them into any necessary CONTINUATION frames is handled // elsewhere. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *http2Framer) WriteHeaders(p http2HeadersFrameParam) error { if !http2validStreamID(p.StreamID) && !f.AllowIllegalWrites { return http2errStreamID } var flags http2Flags if p.PadLength != 0 { flags |= http2FlagHeadersPadded } if p.EndStream { flags |= http2FlagHeadersEndStream } if p.EndHeaders { flags |= http2FlagHeadersEndHeaders } if !p.Priority.IsZero() { flags |= http2FlagHeadersPriority } f.startWrite(http2FrameHeaders, flags, p.StreamID) if p.PadLength != 0 { f.writeByte(p.PadLength) } if !p.Priority.IsZero() { v := p.Priority.StreamDep if !http2validStreamIDOrZero(v) && !f.AllowIllegalWrites { return http2errDepStreamID } if p.Priority.Exclusive { v |= 1 << 31 } f.writeUint32(v) f.writeByte(p.Priority.Weight) } f.wbuf = append(f.wbuf, p.BlockFragment...) f.wbuf = append(f.wbuf, http2padZeros[:p.PadLength]...) return f.endWrite() } // A PriorityFrame specifies the sender-advised priority of a stream. // See http://http2.github.io/http2-spec/#rfc.section.6.3 type http2PriorityFrame struct { http2FrameHeader http2PriorityParam } // PriorityParam are the stream prioritzation parameters. type http2PriorityParam struct { // StreamDep is a 31-bit stream identifier for the // stream that this stream depends on. Zero means no // dependency. StreamDep uint32 // Exclusive is whether the dependency is exclusive. Exclusive bool // Weight is the stream's zero-indexed weight. It should be // set together with StreamDep, or neither should be set. Per // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 } func (p http2PriorityParam) IsZero() bool { return p == http2PriorityParam{} } func http2parsePriorityFrame(fh http2FrameHeader, payload []byte) (http2Frame, error) { if fh.StreamID == 0 { return nil, http2connError{http2ErrCodeProtocol, "PRIORITY frame with stream ID 0"} } if len(payload) != 5 { return nil, http2connError{http2ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} } v := binary.BigEndian.Uint32(payload[:4]) streamID := v & 0x7fffffff return &http2PriorityFrame{ http2FrameHeader: fh, http2PriorityParam: http2PriorityParam{ Weight: payload[4], StreamDep: streamID, Exclusive: streamID != v, }, }, nil } // WritePriority writes a PRIORITY frame. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *http2Framer) WritePriority(streamID uint32, p http2PriorityParam) error { if !http2validStreamID(streamID) && !f.AllowIllegalWrites { return http2errStreamID } if !http2validStreamIDOrZero(p.StreamDep) { return http2errDepStreamID } f.startWrite(http2FramePriority, 0, streamID) v := p.StreamDep if p.Exclusive { v |= 1 << 31 } f.writeUint32(v) f.writeByte(p.Weight) return f.endWrite() } // A RSTStreamFrame allows for abnormal termination of a stream. // See http://http2.github.io/http2-spec/#rfc.section.6.4 type http2RSTStreamFrame struct { http2FrameHeader ErrCode http2ErrCode } func http2parseRSTStreamFrame(fh http2FrameHeader, p []byte) (http2Frame, error) { if len(p) != 4 { return nil, http2ConnectionError(http2ErrCodeFrameSize) } if fh.StreamID == 0 { return nil, http2ConnectionError(http2ErrCodeProtocol) } return &http2RSTStreamFrame{fh, http2ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil } // WriteRSTStream writes a RST_STREAM frame. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *http2Framer) WriteRSTStream(streamID uint32, code http2ErrCode) error { if !http2validStreamID(streamID) && !f.AllowIllegalWrites { return http2errStreamID } f.startWrite(http2FrameRSTStream, 0, streamID) f.writeUint32(uint32(code)) return f.endWrite() } // A ContinuationFrame is used to continue a sequence of header block fragments. // See http://http2.github.io/http2-spec/#rfc.section.6.10 type http2ContinuationFrame struct { http2FrameHeader headerFragBuf []byte } func http2parseContinuationFrame(fh http2FrameHeader, p []byte) (http2Frame, error) { if fh.StreamID == 0 { return nil, http2connError{http2ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} } return &http2ContinuationFrame{fh, p}, nil } func (f *http2ContinuationFrame) HeaderBlockFragment() []byte { f.checkValid() return f.headerFragBuf } func (f *http2ContinuationFrame) HeadersEnded() bool { return f.http2FrameHeader.Flags.Has(http2FlagContinuationEndHeaders) } // WriteContinuation writes a CONTINUATION frame. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *http2Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { if !http2validStreamID(streamID) && !f.AllowIllegalWrites { return http2errStreamID } var flags http2Flags if endHeaders { flags |= http2FlagContinuationEndHeaders } f.startWrite(http2FrameContinuation, flags, streamID) f.wbuf = append(f.wbuf, headerBlockFragment...) return f.endWrite() } // A PushPromiseFrame is used to initiate a server stream. // See http://http2.github.io/http2-spec/#rfc.section.6.6 type http2PushPromiseFrame struct { http2FrameHeader PromiseID uint32 headerFragBuf []byte // not owned } func (f *http2PushPromiseFrame) HeaderBlockFragment() []byte { f.checkValid() return f.headerFragBuf } func (f *http2PushPromiseFrame) HeadersEnded() bool { return f.http2FrameHeader.Flags.Has(http2FlagPushPromiseEndHeaders) } func http2parsePushPromise(fh http2FrameHeader, p []byte) (_ http2Frame, err error) { pp := &http2PushPromiseFrame{ http2FrameHeader: fh, } if pp.StreamID == 0 { return nil, http2ConnectionError(http2ErrCodeProtocol) } // The PUSH_PROMISE frame includes optional padding. // Padding fields and flags are identical to those defined for DATA frames var padLength uint8 if fh.Flags.Has(http2FlagPushPromisePadded) { if p, padLength, err = http2readByte(p); err != nil { return } } p, pp.PromiseID, err = http2readUint32(p) if err != nil { return } pp.PromiseID = pp.PromiseID & (1<<31 - 1) if int(padLength) > len(p) { return nil, http2ConnectionError(http2ErrCodeProtocol) } pp.headerFragBuf = p[:len(p)-int(padLength)] return pp, nil } // PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. type http2PushPromiseParam struct { // StreamID is the required Stream ID to initiate. StreamID uint32 // PromiseID is the required Stream ID which this // Push Promises PromiseID uint32 // BlockFragment is part (or all) of a Header Block. BlockFragment []byte // EndHeaders indicates that this frame contains an entire // header block and is not followed by any // CONTINUATION frames. EndHeaders bool // PadLength is the optional number of bytes of zeros to add // to this frame. PadLength uint8 } // WritePushPromise writes a single PushPromise Frame. // // As with Header Frames, This is the low level call for writing // individual frames. Continuation frames are handled elsewhere. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *http2Framer) WritePushPromise(p http2PushPromiseParam) error { if !http2validStreamID(p.StreamID) && !f.AllowIllegalWrites { return http2errStreamID } var flags http2Flags if p.PadLength != 0 { flags |= http2FlagPushPromisePadded } if p.EndHeaders { flags |= http2FlagPushPromiseEndHeaders } f.startWrite(http2FramePushPromise, flags, p.StreamID) if p.PadLength != 0 { f.writeByte(p.PadLength) } if !http2validStreamID(p.PromiseID) && !f.AllowIllegalWrites { return http2errStreamID } f.writeUint32(p.PromiseID) f.wbuf = append(f.wbuf, p.BlockFragment...) f.wbuf = append(f.wbuf, http2padZeros[:p.PadLength]...) return f.endWrite() } // WriteRawFrame writes a raw frame. This can be used to write // extension frames unknown to this package. func (f *http2Framer) WriteRawFrame(t http2FrameType, flags http2Flags, streamID uint32, payload []byte) error { f.startWrite(t, flags, streamID) f.writeBytes(payload) return f.endWrite() } func http2readByte(p []byte) (remain []byte, b byte, err error) { if len(p) == 0 { return nil, 0, io.ErrUnexpectedEOF } return p[1:], p[0], nil } func http2readUint32(p []byte) (remain []byte, v uint32, err error) { if len(p) < 4 { return nil, 0, io.ErrUnexpectedEOF } return p[4:], binary.BigEndian.Uint32(p[:4]), nil } type http2streamEnder interface { StreamEnded() bool } type http2headersEnder interface { HeadersEnded() bool } type http2headersOrContinuation interface { http2headersEnder HeaderBlockFragment() []byte } // A MetaHeadersFrame is the representation of one HEADERS frame and // zero or more contiguous CONTINUATION frames and the decoding of // their HPACK-encoded contents. // // This type of frame does not appear on the wire and is only returned // by the Framer when Framer.ReadMetaHeaders is set. type http2MetaHeadersFrame struct { *http2HeadersFrame // Fields are the fields contained in the HEADERS and // CONTINUATION frames. The underlying slice is owned by the // Framer and must not be retained after the next call to // ReadFrame. // // Fields are guaranteed to be in the correct http2 order and // not have unknown pseudo header fields or invalid header // field names or values. Required pseudo header fields may be // missing, however. Use the MetaHeadersFrame.Pseudo accessor // method access pseudo headers. Fields []hpack.HeaderField // Truncated is whether the max header list size limit was hit // and Fields is incomplete. The hpack decoder state is still // valid, however. Truncated bool } // PseudoValue returns the given pseudo header field's value. // The provided pseudo field should not contain the leading colon. func (mh *http2MetaHeadersFrame) PseudoValue(pseudo string) string { for _, hf := range mh.Fields { if !hf.IsPseudo() { return "" } if hf.Name[1:] == pseudo { return hf.Value } } return "" } // RegularFields returns the regular (non-pseudo) header fields of mh. // The caller does not own the returned slice. func (mh *http2MetaHeadersFrame) RegularFields() []hpack.HeaderField { for i, hf := range mh.Fields { if !hf.IsPseudo() { return mh.Fields[i:] } } return nil } // PseudoFields returns the pseudo header fields of mh. // The caller does not own the returned slice. func (mh *http2MetaHeadersFrame) PseudoFields() []hpack.HeaderField { for i, hf := range mh.Fields { if !hf.IsPseudo() { return mh.Fields[:i] } } return mh.Fields } func (mh *http2MetaHeadersFrame) checkPseudos() error { var isRequest, isResponse bool pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { case ":method", ":path", ":scheme", ":authority": isRequest = true case ":status": isResponse = true default: return http2pseudoHeaderError(hf.Name) } for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { return http2duplicatePseudoHeaderError(hf.Name) } } } if isRequest && isResponse { return http2errMixPseudoHeaderTypes } return nil } func (fr *http2Framer) maxHeaderStringLen() int { v := fr.maxHeaderListSize() if uint32(int(v)) == v { return int(v) } return 0 } // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. func (fr *http2Framer) readMetaFrame(hf *http2HeadersFrame) (*http2MetaHeadersFrame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } mh := &http2MetaHeadersFrame{ http2HeadersFrame: hf, } var remainSize = fr.maxHeaderListSize() var sawRegular bool var invalid error // pseudo header field errors hdec := fr.ReadMetaHeaders hdec.SetEmitEnabled(true) hdec.SetMaxStringLength(fr.maxHeaderStringLen()) hdec.SetEmitFunc(func(hf hpack.HeaderField) { if http2VerboseLogs && http2logFrameReads { log.Printf("http2: decoded hpack field %+v", hf) } if !httplex.ValidHeaderFieldValue(hf.Value) { invalid = http2headerFieldValueError(hf.Value) } isPseudo := strings.HasPrefix(hf.Name, ":") if isPseudo { if sawRegular { invalid = http2errPseudoAfterRegular } } else { sawRegular = true if !http2validWireHeaderFieldName(hf.Name) { invalid = http2headerFieldNameError(hf.Name) } } if invalid != nil { hdec.SetEmitEnabled(false) return } size := hf.Size() if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true return } remainSize -= size mh.Fields = append(mh.Fields, hf) }) defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) var hc http2headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() if _, err := hdec.Write(frag); err != nil { return nil, http2ConnectionError(http2ErrCodeCompression) } if hc.HeadersEnded() { break } if f, err := fr.ReadFrame(); err != nil { return nil, err } else { hc = f.(*http2ContinuationFrame) } } mh.http2HeadersFrame.headerFragBuf = nil mh.http2HeadersFrame.invalidate() if err := hdec.Close(); err != nil { return nil, http2ConnectionError(http2ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid if http2VerboseLogs { log.Printf("http2: invalid header: %v", invalid) } return nil, http2StreamError{mh.StreamID, http2ErrCodeProtocol, invalid} } if err := mh.checkPseudos(); err != nil { fr.errDetail = err if http2VerboseLogs { log.Printf("http2: invalid pseudo headers: %v", err) } return nil, http2StreamError{mh.StreamID, http2ErrCodeProtocol, err} } return mh, nil } func http2summarizeFrame(f http2Frame) string { var buf bytes.Buffer f.Header().writeDebug(&buf) switch f := f.(type) { case *http2SettingsFrame: n := 0 f.ForeachSetting(func(s http2Setting) error { n++ if n == 1 { buf.WriteString(", settings:") } fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) return nil }) if n > 0 { buf.Truncate(buf.Len() - 1) } case *http2DataFrame: data := f.Data() const max = 256 if len(data) > max { data = data[:max] } fmt.Fprintf(&buf, " data=%q", data) if len(f.Data()) > max { fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) } case *http2WindowUpdateFrame: if f.StreamID == 0 { buf.WriteString(" (conn)") } fmt.Fprintf(&buf, " incr=%v", f.Increment) case *http2PingFrame: fmt.Fprintf(&buf, " ping=%q", f.Data[:]) case *http2GoAwayFrame: fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", f.LastStreamID, f.ErrCode, f.debugData) case *http2RSTStreamFrame: fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) } return buf.String() } func http2transportExpectContinueTimeout(t1 *Transport) time.Duration { return t1.ExpectContinueTimeout } // isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. func http2isBadCipher(cipher uint16) bool { switch cipher { case tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, tls.TLS_RSA_WITH_AES_128_CBC_SHA, tls.TLS_RSA_WITH_AES_256_CBC_SHA, tls.TLS_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: return true default: return false } } type http2contextContext interface { context.Context } func http2serverConnBaseContext(c net.Conn, opts *http2ServeConnOpts) (ctx http2contextContext, cancel func()) { ctx, cancel = context.WithCancel(context.Background()) ctx = context.WithValue(ctx, LocalAddrContextKey, c.LocalAddr()) if hs := opts.baseConfig(); hs != nil { ctx = context.WithValue(ctx, ServerContextKey, hs) } return } func http2contextWithCancel(ctx http2contextContext) (_ http2contextContext, cancel func()) { return context.WithCancel(ctx) } func http2requestWithContext(req *Request, ctx http2contextContext) *Request { return req.WithContext(ctx) } type http2clientTrace httptrace.ClientTrace func http2reqContext(r *Request) context.Context { return r.Context() } func http2setResponseUncompressed(res *Response) { res.Uncompressed = true } func http2traceGotConn(req *Request, cc *http2ClientConn) { trace := httptrace.ContextClientTrace(req.Context()) if trace == nil || trace.GotConn == nil { return } ci := httptrace.GotConnInfo{Conn: cc.tconn} cc.mu.Lock() ci.Reused = cc.nextStreamID > 1 ci.WasIdle = len(cc.streams) == 0 && ci.Reused if ci.WasIdle && !cc.lastActive.IsZero() { ci.IdleTime = time.Now().Sub(cc.lastActive) } cc.mu.Unlock() trace.GotConn(ci) } func http2traceWroteHeaders(trace *http2clientTrace) { if trace != nil && trace.WroteHeaders != nil { trace.WroteHeaders() } } func http2traceGot100Continue(trace *http2clientTrace) { if trace != nil && trace.Got100Continue != nil { trace.Got100Continue() } } func http2traceWait100Continue(trace *http2clientTrace) { if trace != nil && trace.Wait100Continue != nil { trace.Wait100Continue() } } func http2traceWroteRequest(trace *http2clientTrace, err error) { if trace != nil && trace.WroteRequest != nil { trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) } } func http2traceFirstResponseByte(trace *http2clientTrace) { if trace != nil && trace.GotFirstResponseByte != nil { trace.GotFirstResponseByte() } } func http2requestTrace(req *Request) *http2clientTrace { trace := httptrace.ContextClientTrace(req.Context()) return (*http2clientTrace)(trace) } func http2cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() } var http2DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" type http2goroutineLock uint64 func http2newGoroutineLock() http2goroutineLock { if !http2DebugGoroutines { return 0 } return http2goroutineLock(http2curGoroutineID()) } func (g http2goroutineLock) check() { if !http2DebugGoroutines { return } if http2curGoroutineID() != uint64(g) { panic("running on the wrong goroutine") } } func (g http2goroutineLock) checkNotOn() { if !http2DebugGoroutines { return } if http2curGoroutineID() == uint64(g) { panic("running on the wrong goroutine") } } var http2goroutineSpace = []byte("goroutine ") func http2curGoroutineID() uint64 { bp := http2littleBuf.Get().(*[]byte) defer http2littleBuf.Put(bp) b := *bp b = b[:runtime.Stack(b, false)] b = bytes.TrimPrefix(b, http2goroutineSpace) i := bytes.IndexByte(b, ' ') if i < 0 { panic(fmt.Sprintf("No space found in %q", b)) } b = b[:i] n, err := http2parseUintBytes(b, 10, 64) if err != nil { panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) } return n } var http2littleBuf = sync.Pool{ New: func() interface{} { buf := make([]byte, 64) return &buf }, } // parseUintBytes is like strconv.ParseUint, but using a []byte. func http2parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { var cutoff, maxVal uint64 if bitSize == 0 { bitSize = int(strconv.IntSize) } s0 := s switch { case len(s) < 1: err = strconv.ErrSyntax goto Error case 2 <= base && base <= 36: case base == 0: switch { case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): base = 16 s = s[2:] if len(s) < 1 { err = strconv.ErrSyntax goto Error } case s[0] == '0': base = 8 default: base = 10 } default: err = errors.New("invalid base " + strconv.Itoa(base)) goto Error } n = 0 cutoff = http2cutoff64(base) maxVal = 1<<uint(bitSize) - 1 for i := 0; i < len(s); i++ { var v byte d := s[i] switch { case '0' <= d && d <= '9': v = d - '0' case 'a' <= d && d <= 'z': v = d - 'a' + 10 case 'A' <= d && d <= 'Z': v = d - 'A' + 10 default: n = 0 err = strconv.ErrSyntax goto Error } if int(v) >= base { n = 0 err = strconv.ErrSyntax goto Error } if n >= cutoff { n = 1<<64 - 1 err = strconv.ErrRange goto Error } n *= uint64(base) n1 := n + uint64(v) if n1 < n || n1 > maxVal { n = 1<<64 - 1 err = strconv.ErrRange goto Error } n = n1 } return n, nil Error: return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} } // Return the first number n such that n*base >= 1<<64. func http2cutoff64(base int) uint64 { if base < 2 { return 0 } return (1<<64-1)/uint64(base) + 1 } var ( http2commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case http2commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case ) func init() { for _, v := range []string{ "accept", "accept-charset", "accept-encoding", "accept-language", "accept-ranges", "age", "access-control-allow-origin", "allow", "authorization", "cache-control", "content-disposition", "content-encoding", "content-language", "content-length", "content-location", "content-range", "content-type", "cookie", "date", "etag", "expect", "expires", "from", "host", "if-match", "if-modified-since", "if-none-match", "if-unmodified-since", "last-modified", "link", "location", "max-forwards", "proxy-authenticate", "proxy-authorization", "range", "referer", "refresh", "retry-after", "server", "set-cookie", "strict-transport-security", "trailer", "transfer-encoding", "user-agent", "vary", "via", "www-authenticate", } { chk := CanonicalHeaderKey(v) http2commonLowerHeader[chk] = v http2commonCanonHeader[v] = chk } } func http2lowerHeader(v string) string { if s, ok := http2commonLowerHeader[v]; ok { return s } return strings.ToLower(v) } var ( http2VerboseLogs bool http2logFrameWrites bool http2logFrameReads bool ) func init() { e := os.Getenv("GODEBUG") if strings.Contains(e, "http2debug=1") { http2VerboseLogs = true } if strings.Contains(e, "http2debug=2") { http2VerboseLogs = true http2logFrameWrites = true http2logFrameReads = true } } const ( // ClientPreface is the string that must be sent by new // connections from clients. http2ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" // SETTINGS_MAX_FRAME_SIZE default // http://http2.github.io/http2-spec/#rfc.section.6.5.2 http2initialMaxFrameSize = 16384 // NextProtoTLS is the NPN/ALPN protocol negotiated during // HTTP/2's TLS setup. http2NextProtoTLS = "h2" // http://http2.github.io/http2-spec/#SettingValues http2initialHeaderTableSize = 4096 http2initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size http2defaultMaxReadFrameSize = 1 << 20 ) var ( http2clientPreface = []byte(http2ClientPreface) ) type http2streamState int const ( http2stateIdle http2streamState = iota http2stateOpen http2stateHalfClosedLocal http2stateHalfClosedRemote http2stateResvLocal http2stateResvRemote http2stateClosed ) var http2stateName = [...]string{ http2stateIdle: "Idle", http2stateOpen: "Open", http2stateHalfClosedLocal: "HalfClosedLocal", http2stateHalfClosedRemote: "HalfClosedRemote", http2stateResvLocal: "ResvLocal", http2stateResvRemote: "ResvRemote", http2stateClosed: "Closed", } func (st http2streamState) String() string { return http2stateName[st] } // Setting is a setting parameter: which setting it is, and its value. type http2Setting struct { // ID is which setting is being set. // See http://http2.github.io/http2-spec/#SettingValues ID http2SettingID // Val is the value. Val uint32 } func (s http2Setting) String() string { return fmt.Sprintf("[%v = %d]", s.ID, s.Val) } // Valid reports whether the setting is valid. func (s http2Setting) Valid() error { switch s.ID { case http2SettingEnablePush: if s.Val != 1 && s.Val != 0 { return http2ConnectionError(http2ErrCodeProtocol) } case http2SettingInitialWindowSize: if s.Val > 1<<31-1 { return http2ConnectionError(http2ErrCodeFlowControl) } case http2SettingMaxFrameSize: if s.Val < 16384 || s.Val > 1<<24-1 { return http2ConnectionError(http2ErrCodeProtocol) } } return nil } // A SettingID is an HTTP/2 setting as defined in // http://http2.github.io/http2-spec/#iana-settings type http2SettingID uint16 const ( http2SettingHeaderTableSize http2SettingID = 0x1 http2SettingEnablePush http2SettingID = 0x2 http2SettingMaxConcurrentStreams http2SettingID = 0x3 http2SettingInitialWindowSize http2SettingID = 0x4 http2SettingMaxFrameSize http2SettingID = 0x5 http2SettingMaxHeaderListSize http2SettingID = 0x6 ) var http2settingName = map[http2SettingID]string{ http2SettingHeaderTableSize: "HEADER_TABLE_SIZE", http2SettingEnablePush: "ENABLE_PUSH", http2SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", http2SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", http2SettingMaxFrameSize: "MAX_FRAME_SIZE", http2SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", } func (s http2SettingID) String() string { if v, ok := http2settingName[s]; ok { return v } return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) } var ( http2errInvalidHeaderFieldName = errors.New("http2: invalid header field name") http2errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") ) // validWireHeaderFieldName reports whether v is a valid header field // name (key). See httplex.ValidHeaderName for the base rules. // // Further, http2 says: // "Just as in HTTP/1.x, header field names are strings of ASCII // characters that are compared in a case-insensitive // fashion. However, header field names MUST be converted to // lowercase prior to their encoding in HTTP/2. " func http2validWireHeaderFieldName(v string) bool { if len(v) == 0 { return false } for _, r := range v { if !httplex.IsTokenRune(r) { return false } if 'A' <= r && r <= 'Z' { return false } } return true } var http2httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) func init() { for i := 100; i <= 999; i++ { if v := StatusText(i); v != "" { http2httpCodeStringCommon[i] = strconv.Itoa(i) } } } func http2httpCodeString(code int) string { if s, ok := http2httpCodeStringCommon[code]; ok { return s } return strconv.Itoa(code) } // from pkg io type http2stringWriter interface { WriteString(s string) (n int, err error) } // A gate lets two goroutines coordinate their activities. type http2gate chan struct{} func (g http2gate) Done() { g <- struct{}{} } func (g http2gate) Wait() { <-g } // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type http2closeWaiter chan struct{} // Init makes a closeWaiter usable. // It exists because so a closeWaiter value can be placed inside a // larger struct and have the Mutex and Cond's memory in the same // allocation. func (cw *http2closeWaiter) Init() { *cw = make(chan struct{}) } // Close marks the closeWaiter as closed and unblocks any waiters. func (cw http2closeWaiter) Close() { close(cw) } // Wait waits for the closeWaiter to become closed. func (cw http2closeWaiter) Wait() { <-cw } // bufferedWriter is a buffered writer that writes to w. // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type http2bufferedWriter struct { w io.Writer // immutable bw *bufio.Writer // non-nil when data is buffered } func http2newBufferedWriter(w io.Writer) *http2bufferedWriter { return &http2bufferedWriter{w: w} } var http2bufWriterPool = sync.Pool{ New: func() interface{} { return bufio.NewWriterSize(nil, 4<<10) }, } func (w *http2bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := http2bufWriterPool.Get().(*bufio.Writer) bw.Reset(w.w) w.bw = bw } return w.bw.Write(p) } func (w *http2bufferedWriter) Flush() error { bw := w.bw if bw == nil { return nil } err := bw.Flush() bw.Reset(nil) http2bufWriterPool.Put(bw) w.bw = nil return err } func http2mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") } return uint32(v) } // bodyAllowedForStatus reports whether a given response status code // permits a body. See RFC 2616, section 4.4. func http2bodyAllowedForStatus(status int) bool { switch { case status >= 100 && status <= 199: return false case status == 204: return false case status == 304: return false } return true } type http2httpError struct { msg string timeout bool } func (e *http2httpError) Error() string { return e.msg } func (e *http2httpError) Timeout() bool { return e.timeout } func (e *http2httpError) Temporary() bool { return true } var http2errTimeout error = &http2httpError{msg: "http2: timeout awaiting response headers", timeout: true} type http2connectionStater interface { ConnectionState() tls.ConnectionState } var http2sorterPool = sync.Pool{New: func() interface{} { return new(http2sorter) }} type http2sorter struct { v []string // owned by sorter } func (s *http2sorter) Len() int { return len(s.v) } func (s *http2sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } func (s *http2sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } // Keys returns the sorted keys of h. // // The returned slice is only valid until s used again or returned to // its pool. func (s *http2sorter) Keys(h Header) []string { keys := s.v[:0] for k := range h { keys = append(keys, k) } s.v = keys sort.Sort(s) return keys } func (s *http2sorter) SortStrings(ss []string) { save := s.v s.v = ss sort.Sort(s) s.v = save } // validPseudoPath reports whether v is a valid :path pseudo-header // value. It must be either: // // *) a non-empty string starting with '/', but not with with "//", // *) the string '*', for OPTIONS requests. // // For now this is only used a quick check for deciding when to clean // up Opaque URLs before sending requests from the Transport. // See golang.org/issue/16847 func http2validPseudoPath(v string) bool { return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*" } // pipe is a goroutine-safe io.Reader/io.Writer pair. It's like // io.Pipe except there are no PipeReader/PipeWriter halves, and the // underlying buffer is an interface. (io.Pipe is always unbuffered) type http2pipe struct { mu sync.Mutex c sync.Cond // c.L lazily initialized to &p.mu b http2pipeBuffer err error // read error once empty. non-nil means closed. breakErr error // immediate read error (caller doesn't see rest of b) donec chan struct{} // closed on error readFn func() // optional code to run in Read before error } type http2pipeBuffer interface { Len() int io.Writer io.Reader } func (p *http2pipe) Len() int { p.mu.Lock() defer p.mu.Unlock() return p.b.Len() } // Read waits until data is available and copies bytes // from the buffer into p. func (p *http2pipe) Read(d []byte) (n int, err error) { p.mu.Lock() defer p.mu.Unlock() if p.c.L == nil { p.c.L = &p.mu } for { if p.breakErr != nil { return 0, p.breakErr } if p.b.Len() > 0 { return p.b.Read(d) } if p.err != nil { if p.readFn != nil { p.readFn() p.readFn = nil } return 0, p.err } p.c.Wait() } } var http2errClosedPipeWrite = errors.New("write on closed buffer") // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. func (p *http2pipe) Write(d []byte) (n int, err error) { p.mu.Lock() defer p.mu.Unlock() if p.c.L == nil { p.c.L = &p.mu } defer p.c.Signal() if p.err != nil { return 0, http2errClosedPipeWrite } return p.b.Write(d) } // CloseWithError causes the next Read (waking up a current blocked // Read if needed) to return the provided err after all data has been // read. // // The error must be non-nil. func (p *http2pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } // BreakWithError causes the next Read (waking up a current blocked // Read if needed) to return the provided err immediately, without // waiting for unread data. func (p *http2pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } // closeWithErrorAndCode is like CloseWithError but also sets some code to run // in the caller's goroutine before returning the error. func (p *http2pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } func (p *http2pipe) closeWithError(dst *error, err error, fn func()) { if err == nil { panic("err must be non-nil") } p.mu.Lock() defer p.mu.Unlock() if p.c.L == nil { p.c.L = &p.mu } defer p.c.Signal() if *dst != nil { return } p.readFn = fn *dst = err p.closeDoneLocked() } // requires p.mu be held. func (p *http2pipe) closeDoneLocked() { if p.donec == nil { return } select { case <-p.donec: default: close(p.donec) } } // Err returns the error (if any) first set by BreakWithError or CloseWithError. func (p *http2pipe) Err() error { p.mu.Lock() defer p.mu.Unlock() if p.breakErr != nil { return p.breakErr } return p.err } // Done returns a channel which is closed if and when this pipe is closed // with CloseWithError. func (p *http2pipe) Done() <-chan struct{} { p.mu.Lock() defer p.mu.Unlock() if p.donec == nil { p.donec = make(chan struct{}) if p.err != nil || p.breakErr != nil { p.closeDoneLocked() } } return p.donec } const ( http2prefaceTimeout = 10 * time.Second http2firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway http2handlerChunkWriteSize = 4 << 10 http2defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? ) var ( http2errClientDisconnected = errors.New("client disconnected") http2errClosedBody = errors.New("body closed by handler") http2errHandlerComplete = errors.New("http2: request body closed due to handler exiting") http2errStreamClosed = errors.New("http2: stream closed") ) var http2responseWriterStatePool = sync.Pool{ New: func() interface{} { rws := &http2responseWriterState{} rws.bw = bufio.NewWriterSize(http2chunkWriter{rws}, http2handlerChunkWriteSize) return rws }, } // Test hooks. var ( http2testHookOnConn func() http2testHookGetServerConn func(*http2serverConn) http2testHookOnPanicMu *sync.Mutex // nil except in tests http2testHookOnPanic func(sc *http2serverConn, panicVal interface{}) (rePanic bool) ) // Server is an HTTP/2 server. type http2Server struct { // MaxHandlers limits the number of http.Handler ServeHTTP goroutines // which may run at a time over all connections. // Negative or zero no limit. // TODO: implement MaxHandlers int // MaxConcurrentStreams optionally specifies the number of // concurrent streams that each client may have open at a // time. This is unrelated to the number of http.Handler goroutines // which may be active globally, which is MaxHandlers. // If zero, MaxConcurrentStreams defaults to at least 100, per // the HTTP/2 spec's recommendations. MaxConcurrentStreams uint32 // MaxReadFrameSize optionally specifies the largest frame // this server is willing to read. A valid value is between // 16k and 16M, inclusive. If zero or otherwise invalid, a // default value is used. MaxReadFrameSize uint32 // PermitProhibitedCipherSuites, if true, permits the use of // cipher suites prohibited by the HTTP/2 spec. PermitProhibitedCipherSuites bool } func (s *http2Server) maxReadFrameSize() uint32 { if v := s.MaxReadFrameSize; v >= http2minMaxFrameSize && v <= http2maxFrameSize { return v } return http2defaultMaxReadFrameSize } func (s *http2Server) maxConcurrentStreams() uint32 { if v := s.MaxConcurrentStreams; v > 0 { return v } return http2defaultMaxStreams } // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. // // ConfigureServer must be called before s begins serving. func http2ConfigureServer(s *Server, conf *http2Server) error { if conf == nil { conf = new(http2Server) } if s.TLSConfig == nil { s.TLSConfig = new(tls.Config) } else if s.TLSConfig.CipherSuites != nil { // If they already provided a CipherSuite list, return // an error if it has a bad order or is missing // ECDHE_RSA_WITH_AES_128_GCM_SHA256. const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 haveRequired := false sawBad := false for i, cs := range s.TLSConfig.CipherSuites { if cs == requiredCipher { haveRequired = true } if http2isBadCipher(cs) { sawBad = true } else if sawBad { return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) } } if !haveRequired { return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") } } s.TLSConfig.PreferServerCipherSuites = true haveNPN := false for _, p := range s.TLSConfig.NextProtos { if p == http2NextProtoTLS { haveNPN = true break } } if !haveNPN { s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, http2NextProtoTLS) } s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14") if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*Server, *tls.Conn, Handler){} } protoHandler := func(hs *Server, c *tls.Conn, h Handler) { if http2testHookOnConn != nil { http2testHookOnConn() } conf.ServeConn(c, &http2ServeConnOpts{ Handler: h, BaseConfig: hs, }) } s.TLSNextProto[http2NextProtoTLS] = protoHandler s.TLSNextProto["h2-14"] = protoHandler return nil } // ServeConnOpts are options for the Server.ServeConn method. type http2ServeConnOpts struct { // BaseConfig optionally sets the base configuration // for values. If nil, defaults are used. BaseConfig *Server // Handler specifies which handler to use for processing // requests. If nil, BaseConfig.Handler is used. If BaseConfig // or BaseConfig.Handler is nil, http.DefaultServeMux is used. Handler Handler } func (o *http2ServeConnOpts) baseConfig() *Server { if o != nil && o.BaseConfig != nil { return o.BaseConfig } return new(Server) } func (o *http2ServeConnOpts) handler() Handler { if o != nil { if o.Handler != nil { return o.Handler } if o.BaseConfig != nil && o.BaseConfig.Handler != nil { return o.BaseConfig.Handler } } return DefaultServeMux } // ServeConn serves HTTP/2 requests on the provided connection and // blocks until the connection is no longer readable. // // ServeConn starts speaking HTTP/2 assuming that c has not had any // reads or writes. It writes its initial settings frame and expects // to be able to read the preface and settings frame from the // client. If c has a ConnectionState method like a *tls.Conn, the // ConnectionState is used to verify the TLS ciphersuite and to set // the Request.TLS field in Handlers. // // ServeConn does not support h2c by itself. Any h2c support must be // implemented in terms of providing a suitably-behaving net.Conn. // // The opts parameter is optional. If nil, default values are used. func (s *http2Server) ServeConn(c net.Conn, opts *http2ServeConnOpts) { baseCtx, cancel := http2serverConnBaseContext(c, opts) defer cancel() sc := &http2serverConn{ srv: s, hs: opts.baseConfig(), conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), bw: http2newBufferedWriter(c), handler: opts.handler(), streams: make(map[uint32]*http2stream), readFrameCh: make(chan http2readFrameResult), wantWriteFrameCh: make(chan http2frameWriteMsg, 8), wroteFrameCh: make(chan http2frameWriteResult, 1), bodyReadCh: make(chan http2bodyReadMsg), doneServing: make(chan struct{}), advMaxStreams: s.maxConcurrentStreams(), writeSched: http2writeScheduler{ maxFrameSize: http2initialMaxFrameSize, }, initialWindowSize: http2initialWindowSize, headerTableSize: http2initialHeaderTableSize, serveG: http2newGoroutineLock(), pushEnabled: true, } sc.flow.add(http2initialWindowSize) sc.inflow.add(http2initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) fr := http2NewFramer(sc.bw, c) fr.ReadMetaHeaders = hpack.NewDecoder(http2initialHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() fr.SetMaxReadFrameSize(s.maxReadFrameSize()) sc.framer = fr if tc, ok := c.(http2connectionStater); ok { sc.tlsState = new(tls.ConnectionState) *sc.tlsState = tc.ConnectionState() if sc.tlsState.Version < tls.VersionTLS12 { sc.rejectConn(http2ErrCodeInadequateSecurity, "TLS version too low") return } if sc.tlsState.ServerName == "" { } if !s.PermitProhibitedCipherSuites && http2isBadCipher(sc.tlsState.CipherSuite) { sc.rejectConn(http2ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) return } } if hook := http2testHookGetServerConn; hook != nil { hook(sc) } sc.serve() } func (sc *http2serverConn) rejectConn(err http2ErrCode, debug string) { sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) sc.framer.WriteGoAway(0, err, []byte(debug)) sc.bw.Flush() sc.conn.Close() } type http2serverConn struct { // Immutable: srv *http2Server hs *Server conn net.Conn bw *http2bufferedWriter // writing to conn handler Handler baseCtx http2contextContext framer *http2Framer doneServing chan struct{} // closed when serverConn.serve ends readFrameCh chan http2readFrameResult // written by serverConn.readFrames wantWriteFrameCh chan http2frameWriteMsg // from handlers -> serve wroteFrameCh chan http2frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes bodyReadCh chan http2bodyReadMsg // from handlers -> serve testHookCh chan func(int) // code to run on the serve loop flow http2flow // conn-wide (not stream-specific) outbound flow control inflow http2flow // conn-wide inbound flow control tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string // Everything following is owned by the serve loop; use serveG.check(): serveG http2goroutineLock // used to verify funcs are on serve() pushEnabled bool sawFirstSettings bool // got the initial SETTINGS frame after the preface needToSendSettingsAck bool unackedSettings int // how many SETTINGS have we sent without ACKs? clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client curOpenStreams uint32 // client's number of open streams maxStreamID uint32 // max ever seen streams map[uint32]*http2stream initialWindowSize int32 headerTableSize uint32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh needsFrameFlush bool // last frame write wasn't a flush writeSched http2writeScheduler inGoAway bool // we've started to or sent GOAWAY needToSendGoAway bool // we need to schedule a GOAWAY frame write goAwayCode http2ErrCode shutdownTimerCh <-chan time.Time // nil until used shutdownTimer *time.Timer // nil until used freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer hpackEncoder *hpack.Encoder } func (sc *http2serverConn) maxHeaderListSize() uint32 { n := sc.hs.MaxHeaderBytes if n <= 0 { n = DefaultMaxHeaderBytes } // http2's count is in a slightly different unit and includes 32 bytes per pair. // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. const perFieldOverhead = 32 // per http2 spec const typicalHeaders = 10 // conservative return uint32(n + typicalHeaders*perFieldOverhead) } // stream represents a stream. This is the minimal metadata needed by // the serve goroutine. Most of the actual stream state is owned by // the http.Handler's goroutine in the responseWriter. Because the // responseWriter's responseWriterState is recycled at the end of a // handler, this struct intentionally has no pointer to the // *responseWriter{,State} itself, as the Handler ending nils out the // responseWriter's state field. type http2stream struct { // immutable: sc *http2serverConn id uint32 body *http2pipe // non-nil if expecting DATA frames cw http2closeWaiter // closed wait stream transitions to closed state ctx http2contextContext cancelCtx func() // owned by serverConn's serve loop: bodyBytes int64 // body bytes seen so far declBodyBytes int64 // or -1 if undeclared flow http2flow // limits writing from Handler to client inflow http2flow // what the client is allowed to POST/etc to us parent *http2stream // or nil numTrailerValues int64 weight uint8 state http2streamState sentReset bool // only true once detached from streams map gotReset bool // only true once detacted from streams map gotTrailerHeader bool // HEADER frame for trailers was seen wroteHeaders bool // whether we wrote headers (not status 100) reqBuf []byte trailer Header // accumulated trailers reqTrailer Header // handler's Request.Trailer } func (sc *http2serverConn) Framer() *http2Framer { return sc.framer } func (sc *http2serverConn) CloseConn() error { return sc.conn.Close() } func (sc *http2serverConn) Flush() error { return sc.bw.Flush() } func (sc *http2serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { return sc.hpackEncoder, &sc.headerWriteBuf } func (sc *http2serverConn) state(streamID uint32) (http2streamState, *http2stream) { sc.serveG.check() if st, ok := sc.streams[streamID]; ok { return st.state, st } if streamID <= sc.maxStreamID { return http2stateClosed, nil } return http2stateIdle, nil } // setConnState calls the net/http ConnState hook for this connection, if configured. // Note that the net/http package does StateNew and StateClosed for us. // There is currently no plan for StateHijacked or hijacking HTTP/2 connections. func (sc *http2serverConn) setConnState(state ConnState) { if sc.hs.ConnState != nil { sc.hs.ConnState(sc.conn, state) } } func (sc *http2serverConn) vlogf(format string, args ...interface{}) { if http2VerboseLogs { sc.logf(format, args...) } } func (sc *http2serverConn) logf(format string, args ...interface{}) { if lg := sc.hs.ErrorLog; lg != nil { lg.Printf(format, args...) } else { log.Printf(format, args...) } } // errno returns v's underlying uintptr, else 0. // // TODO: remove this helper function once http2 can use build // tags. See comment in isClosedConnError. func http2errno(v error) uintptr { if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { return uintptr(rv.Uint()) } return 0 } // isClosedConnError reports whether err is an error from use of a closed // network connection. func http2isClosedConnError(err error) bool { if err == nil { return false } str := err.Error() if strings.Contains(str, "use of closed network connection") { return true } if runtime.GOOS == "windows" { if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { const WSAECONNABORTED = 10053 const WSAECONNRESET = 10054 if n := http2errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { return true } } } } return false } func (sc *http2serverConn) condlogf(err error, format string, args ...interface{}) { if err == nil { return } if err == io.EOF || err == io.ErrUnexpectedEOF || http2isClosedConnError(err) { sc.vlogf(format, args...) } else { sc.logf(format, args...) } } func (sc *http2serverConn) canonicalHeader(v string) string { sc.serveG.check() cv, ok := http2commonCanonHeader[v] if ok { return cv } cv, ok = sc.canonHeader[v] if ok { return cv } if sc.canonHeader == nil { sc.canonHeader = make(map[string]string) } cv = CanonicalHeaderKey(v) sc.canonHeader[v] = cv return cv } type http2readFrameResult struct { f http2Frame // valid until readMore is called err error // readMore should be called once the consumer no longer needs or // retains f. After readMore, f is invalid and more frames can be // read. readMore func() } // readFrames is the loop that reads incoming frames. // It takes care to only read one frame at a time, blocking until the // consumer is done with the frame. // It's run on its own goroutine. func (sc *http2serverConn) readFrames() { gate := make(http2gate) gateDone := gate.Done for { f, err := sc.framer.ReadFrame() select { case sc.readFrameCh <- http2readFrameResult{f, err, gateDone}: case <-sc.doneServing: return } select { case <-gate: case <-sc.doneServing: return } if http2terminalReadFrameError(err) { return } } } // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. type http2frameWriteResult struct { wm http2frameWriteMsg // what was written (or attempted) err error // result of the writeFrame call } // writeFrameAsync runs in its own goroutine and writes a single frame // and then reports when it's done. // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *http2serverConn) writeFrameAsync(wm http2frameWriteMsg) { err := wm.write.writeFrame(sc) sc.wroteFrameCh <- http2frameWriteResult{wm, err} } func (sc *http2serverConn) closeAllStreamsOnConnClose() { sc.serveG.check() for _, st := range sc.streams { sc.closeStream(st, http2errClientDisconnected) } } func (sc *http2serverConn) stopShutdownTimer() { sc.serveG.check() if t := sc.shutdownTimer; t != nil { t.Stop() } } func (sc *http2serverConn) notePanic() { if http2testHookOnPanicMu != nil { http2testHookOnPanicMu.Lock() defer http2testHookOnPanicMu.Unlock() } if http2testHookOnPanic != nil { if e := recover(); e != nil { if http2testHookOnPanic(sc, e) { panic(e) } } } } func (sc *http2serverConn) serve() { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() defer sc.closeAllStreamsOnConnClose() defer sc.stopShutdownTimer() defer close(sc.doneServing) if http2VerboseLogs { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } sc.writeFrame(http2frameWriteMsg{ write: http2writeSettings{ {http2SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {http2SettingMaxConcurrentStreams, sc.advMaxStreams}, {http2SettingMaxHeaderListSize, sc.maxHeaderListSize()}, }, }) sc.unackedSettings++ if err := sc.readPreface(); err != nil { sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) return } sc.setConnState(StateActive) sc.setConnState(StateIdle) go sc.readFrames() settingsTimer := time.NewTimer(http2firstSettingsTimeout) loopNum := 0 for { loopNum++ select { case wm := <-sc.wantWriteFrameCh: sc.writeFrame(wm) case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: if !sc.processFrameFromReader(res) { return } res.readMore() if settingsTimer.C != nil { settingsTimer.Stop() settingsTimer.C = nil } case m := <-sc.bodyReadCh: sc.noteBodyRead(m.st, m.n) case <-settingsTimer.C: sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) return case <-sc.shutdownTimerCh: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return case fn := <-sc.testHookCh: fn(loopNum) } } } // readPreface reads the ClientPreface greeting from the peer // or returns an error on timeout or an invalid greeting. func (sc *http2serverConn) readPreface() error { errc := make(chan error, 1) go func() { buf := make([]byte, len(http2ClientPreface)) if _, err := io.ReadFull(sc.conn, buf); err != nil { errc <- err } else if !bytes.Equal(buf, http2clientPreface) { errc <- fmt.Errorf("bogus greeting %q", buf) } else { errc <- nil } }() timer := time.NewTimer(http2prefaceTimeout) defer timer.Stop() select { case <-timer.C: return errors.New("timeout waiting for client preface") case err := <-errc: if err == nil { if http2VerboseLogs { sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) } } return err } } var http2errChanPool = sync.Pool{ New: func() interface{} { return make(chan error, 1) }, } var http2writeDataPool = sync.Pool{ New: func() interface{} { return new(http2writeData) }, } // writeDataFromHandler writes DATA response frames from a handler on // the given stream. func (sc *http2serverConn) writeDataFromHandler(stream *http2stream, data []byte, endStream bool) error { ch := http2errChanPool.Get().(chan error) writeArg := http2writeDataPool.Get().(*http2writeData) *writeArg = http2writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(http2frameWriteMsg{ write: writeArg, stream: stream, done: ch, }) if err != nil { return err } var frameWriteDone bool // the frame write is done (successfully or not) select { case err = <-ch: frameWriteDone = true case <-sc.doneServing: return http2errClientDisconnected case <-stream.cw: select { case err = <-ch: frameWriteDone = true default: return http2errStreamClosed } } http2errChanPool.Put(ch) if frameWriteDone { http2writeDataPool.Put(writeArg) } return err } // writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts // if the connection has gone away. // // This must not be run from the serve goroutine itself, else it might // deadlock writing to sc.wantWriteFrameCh (which is only mildly // buffered and is read by serve itself). If you're on the serve // goroutine, call writeFrame instead. func (sc *http2serverConn) writeFrameFromHandler(wm http2frameWriteMsg) error { sc.serveG.checkNotOn() select { case sc.wantWriteFrameCh <- wm: return nil case <-sc.doneServing: return http2errClientDisconnected } } // writeFrame schedules a frame to write and sends it if there's nothing // already being written. // // There is no pushback here (the serve goroutine never blocks). It's // the http.Handlers that block, waiting for their previous frames to // make it onto the wire // // If you're not on the serve goroutine, use writeFrameFromHandler instead. func (sc *http2serverConn) writeFrame(wm http2frameWriteMsg) { sc.serveG.check() var ignoreWrite bool switch wm.write.(type) { case *http2writeResHeaders: wm.stream.wroteHeaders = true case http2write100ContinueHeadersFrame: if wm.stream.wroteHeaders { ignoreWrite = true } } if !ignoreWrite { sc.writeSched.add(wm) } sc.scheduleFrameWrite() } // startFrameWrite starts a goroutine to write wm (in a separate // goroutine since that might block on the network), and updates the // serve goroutine's state about the world, updated from info in wm. func (sc *http2serverConn) startFrameWrite(wm http2frameWriteMsg) { sc.serveG.check() if sc.writingFrame { panic("internal error: can only be writing one frame at a time") } st := wm.stream if st != nil { switch st.state { case http2stateHalfClosedLocal: panic("internal error: attempt to send frame on half-closed-local stream") case http2stateClosed: if st.sentReset || st.gotReset { sc.scheduleFrameWrite() return } panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm)) } } sc.writingFrame = true sc.needsFrameFlush = true go sc.writeFrameAsync(wm) } // errHandlerPanicked is the error given to any callers blocked in a read from // Request.Body when the main goroutine panics. Since most handlers read in the // the main ServeHTTP goroutine, this will show up rarely. var http2errHandlerPanicked = errors.New("http2: handler panicked") // wroteFrame is called on the serve goroutine with the result of // whatever happened on writeFrameAsync. func (sc *http2serverConn) wroteFrame(res http2frameWriteResult) { sc.serveG.check() if !sc.writingFrame { panic("internal error: expected to be already writing a frame") } sc.writingFrame = false wm := res.wm st := wm.stream closeStream := http2endsStream(wm.write) if _, ok := wm.write.(http2handlerPanicRST); ok { sc.closeStream(st, http2errHandlerPanicked) } if ch := wm.done; ch != nil { select { case ch <- res.err: default: panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write)) } } wm.write = nil if closeStream { if st == nil { panic("internal error: expecting non-nil stream") } switch st.state { case http2stateOpen: st.state = http2stateHalfClosedLocal errCancel := http2streamError(st.id, http2ErrCodeCancel) sc.resetStream(errCancel) case http2stateHalfClosedRemote: sc.closeStream(st, http2errHandlerComplete) } } sc.scheduleFrameWrite() } // scheduleFrameWrite tickles the frame writing scheduler. // // If a frame is already being written, nothing happens. This will be called again // when the frame is done being written. // // If a frame isn't being written we need to send one, the best frame // to send is selected, preferring first things that aren't // stream-specific (e.g. ACKing settings), and then finding the // highest priority stream. // // If a frame isn't being written and there's nothing else to send, we // flush the write buffer. func (sc *http2serverConn) scheduleFrameWrite() { sc.serveG.check() if sc.writingFrame { return } if sc.needToSendGoAway { sc.needToSendGoAway = false sc.startFrameWrite(http2frameWriteMsg{ write: &http2writeGoAway{ maxStreamID: sc.maxStreamID, code: sc.goAwayCode, }, }) return } if sc.needToSendSettingsAck { sc.needToSendSettingsAck = false sc.startFrameWrite(http2frameWriteMsg{write: http2writeSettingsAck{}}) return } if !sc.inGoAway { if wm, ok := sc.writeSched.take(); ok { sc.startFrameWrite(wm) return } } if sc.needsFrameFlush { sc.startFrameWrite(http2frameWriteMsg{write: http2flushFrameWriter{}}) sc.needsFrameFlush = false return } } func (sc *http2serverConn) goAway(code http2ErrCode) { sc.serveG.check() if sc.inGoAway { return } if code != http2ErrCodeNo { sc.shutDownIn(250 * time.Millisecond) } else { sc.shutDownIn(1 * time.Second) } sc.inGoAway = true sc.needToSendGoAway = true sc.goAwayCode = code sc.scheduleFrameWrite() } func (sc *http2serverConn) shutDownIn(d time.Duration) { sc.serveG.check() sc.shutdownTimer = time.NewTimer(d) sc.shutdownTimerCh = sc.shutdownTimer.C } func (sc *http2serverConn) resetStream(se http2StreamError) { sc.serveG.check() sc.writeFrame(http2frameWriteMsg{write: se}) if st, ok := sc.streams[se.StreamID]; ok { st.sentReset = true sc.closeStream(st, se) } } // processFrameFromReader processes the serve loop's read from readFrameCh from the // frame-reading goroutine. // processFrameFromReader returns whether the connection should be kept open. func (sc *http2serverConn) processFrameFromReader(res http2readFrameResult) bool { sc.serveG.check() err := res.err if err != nil { if err == http2ErrFrameTooLarge { sc.goAway(http2ErrCodeFrameSize) return true } clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || http2isClosedConnError(err) if clientGone { return false } } else { f := res.f if http2VerboseLogs { sc.vlogf("http2: server read frame %v", http2summarizeFrame(f)) } err = sc.processFrame(f) if err == nil { return true } } switch ev := err.(type) { case http2StreamError: sc.resetStream(ev) return true case http2goAwayFlowError: sc.goAway(http2ErrCodeFlowControl) return true case http2ConnectionError: sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(http2ErrCode(ev)) return true default: if res.err != nil { sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) } else { sc.logf("http2: server closing client connection: %v", err) } return false } } func (sc *http2serverConn) processFrame(f http2Frame) error { sc.serveG.check() if !sc.sawFirstSettings { if _, ok := f.(*http2SettingsFrame); !ok { return http2ConnectionError(http2ErrCodeProtocol) } sc.sawFirstSettings = true } switch f := f.(type) { case *http2SettingsFrame: return sc.processSettings(f) case *http2MetaHeadersFrame: return sc.processHeaders(f) case *http2WindowUpdateFrame: return sc.processWindowUpdate(f) case *http2PingFrame: return sc.processPing(f) case *http2DataFrame: return sc.processData(f) case *http2RSTStreamFrame: return sc.processResetStream(f) case *http2PriorityFrame: return sc.processPriority(f) case *http2PushPromiseFrame: return http2ConnectionError(http2ErrCodeProtocol) default: sc.vlogf("http2: server ignoring frame: %v", f.Header()) return nil } } func (sc *http2serverConn) processPing(f *http2PingFrame) error { sc.serveG.check() if f.IsAck() { return nil } if f.StreamID != 0 { return http2ConnectionError(http2ErrCodeProtocol) } sc.writeFrame(http2frameWriteMsg{write: http2writePingAck{f}}) return nil } func (sc *http2serverConn) processWindowUpdate(f *http2WindowUpdateFrame) error { sc.serveG.check() switch { case f.StreamID != 0: st := sc.streams[f.StreamID] if st == nil { return nil } if !st.flow.add(int32(f.Increment)) { return http2streamError(f.StreamID, http2ErrCodeFlowControl) } default: if !sc.flow.add(int32(f.Increment)) { return http2goAwayFlowError{} } } sc.scheduleFrameWrite() return nil } func (sc *http2serverConn) processResetStream(f *http2RSTStreamFrame) error { sc.serveG.check() state, st := sc.state(f.StreamID) if state == http2stateIdle { return http2ConnectionError(http2ErrCodeProtocol) } if st != nil { st.gotReset = true st.cancelCtx() sc.closeStream(st, http2streamError(f.StreamID, f.ErrCode)) } return nil } func (sc *http2serverConn) closeStream(st *http2stream, err error) { sc.serveG.check() if st.state == http2stateIdle || st.state == http2stateClosed { panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) } st.state = http2stateClosed sc.curOpenStreams-- if sc.curOpenStreams == 0 { sc.setConnState(StateIdle) } delete(sc.streams, st.id) if p := st.body; p != nil { sc.sendWindowUpdate(nil, p.Len()) p.CloseWithError(err) } st.cw.Close() sc.writeSched.forgetStream(st.id) if st.reqBuf != nil { sc.freeRequestBodyBuf = st.reqBuf } } func (sc *http2serverConn) processSettings(f *http2SettingsFrame) error { sc.serveG.check() if f.IsAck() { sc.unackedSettings-- if sc.unackedSettings < 0 { return http2ConnectionError(http2ErrCodeProtocol) } return nil } if err := f.ForeachSetting(sc.processSetting); err != nil { return err } sc.needToSendSettingsAck = true sc.scheduleFrameWrite() return nil } func (sc *http2serverConn) processSetting(s http2Setting) error { sc.serveG.check() if err := s.Valid(); err != nil { return err } if http2VerboseLogs { sc.vlogf("http2: server processing setting %v", s) } switch s.ID { case http2SettingHeaderTableSize: sc.headerTableSize = s.Val sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) case http2SettingEnablePush: sc.pushEnabled = s.Val != 0 case http2SettingMaxConcurrentStreams: sc.clientMaxStreams = s.Val case http2SettingInitialWindowSize: return sc.processSettingInitialWindowSize(s.Val) case http2SettingMaxFrameSize: sc.writeSched.maxFrameSize = s.Val case http2SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val default: if http2VerboseLogs { sc.vlogf("http2: server ignoring unknown setting %v", s) } } return nil } func (sc *http2serverConn) processSettingInitialWindowSize(val uint32) error { sc.serveG.check() old := sc.initialWindowSize sc.initialWindowSize = int32(val) growth := sc.initialWindowSize - old for _, st := range sc.streams { if !st.flow.add(growth) { return http2ConnectionError(http2ErrCodeFlowControl) } } return nil } func (sc *http2serverConn) processData(f *http2DataFrame) error { sc.serveG.check() data := f.Data() id := f.Header().StreamID st, ok := sc.streams[id] if !ok || st.state != http2stateOpen || st.gotTrailerHeader { if sc.inflow.available() < int32(f.Length) { return http2streamError(id, http2ErrCodeFlowControl) } sc.inflow.take(int32(f.Length)) sc.sendWindowUpdate(nil, int(f.Length)) return http2streamError(id, http2ErrCodeStreamClosed) } if st.body == nil { panic("internal error: should have a body in this state") } if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) return http2streamError(id, http2ErrCodeStreamClosed) } if f.Length > 0 { if st.inflow.available() < int32(f.Length) { return http2streamError(id, http2ErrCodeFlowControl) } st.inflow.take(int32(f.Length)) if len(data) > 0 { wrote, err := st.body.Write(data) if err != nil { return http2streamError(id, http2ErrCodeStreamClosed) } if wrote != len(data) { panic("internal error: bad Writer") } st.bodyBytes += int64(len(data)) } if pad := int32(f.Length) - int32(len(data)); pad > 0 { sc.sendWindowUpdate32(nil, pad) sc.sendWindowUpdate32(st, pad) } } if f.StreamEnded() { st.endStream() } return nil } // endStream closes a Request.Body's pipe. It is called when a DATA // frame says a request body is over (or after trailers). func (st *http2stream) endStream() { sc := st.sc sc.serveG.check() if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", st.declBodyBytes, st.bodyBytes)) } else { st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) st.body.CloseWithError(io.EOF) } st.state = http2stateHalfClosedRemote } // copyTrailersToHandlerRequest is run in the Handler's goroutine in // its Request.Body.Read just before it gets io.EOF. func (st *http2stream) copyTrailersToHandlerRequest() { for k, vv := range st.trailer { if _, ok := st.reqTrailer[k]; ok { st.reqTrailer[k] = vv } } } func (sc *http2serverConn) processHeaders(f *http2MetaHeadersFrame) error { sc.serveG.check() id := f.Header().StreamID if sc.inGoAway { return nil } if id%2 != 1 { return http2ConnectionError(http2ErrCodeProtocol) } st := sc.streams[f.Header().StreamID] if st != nil { return st.processTrailerHeaders(f) } if id <= sc.maxStreamID { return http2ConnectionError(http2ErrCodeProtocol) } sc.maxStreamID = id ctx, cancelCtx := http2contextWithCancel(sc.baseCtx) st = &http2stream{ sc: sc, id: id, state: http2stateOpen, ctx: ctx, cancelCtx: cancelCtx, } if f.StreamEnded() { st.state = http2stateHalfClosedRemote } st.cw.Init() st.flow.conn = &sc.flow st.flow.add(sc.initialWindowSize) st.inflow.conn = &sc.inflow st.inflow.add(http2initialWindowSize) sc.streams[id] = st if f.HasPriority() { http2adjustStreamPriority(sc.streams, st.id, f.Priority) } sc.curOpenStreams++ if sc.curOpenStreams == 1 { sc.setConnState(StateActive) } if sc.curOpenStreams > sc.advMaxStreams { if sc.unackedSettings == 0 { return http2streamError(st.id, http2ErrCodeProtocol) } return http2streamError(st.id, http2ErrCodeRefusedStream) } rw, req, err := sc.newWriterAndRequest(st, f) if err != nil { return err } st.reqTrailer = req.Trailer if st.reqTrailer != nil { st.trailer = make(Header) } st.body = req.Body.(*http2requestBody).pipe st.declBodyBytes = req.ContentLength handler := sc.handler.ServeHTTP if f.Truncated { handler = http2handleHeaderListTooLong } else if err := http2checkValidHTTP2Request(req); err != nil { handler = http2new400Handler(err) } go sc.runHandler(rw, req, handler) return nil } func (st *http2stream) processTrailerHeaders(f *http2MetaHeadersFrame) error { sc := st.sc sc.serveG.check() if st.gotTrailerHeader { return http2ConnectionError(http2ErrCodeProtocol) } st.gotTrailerHeader = true if !f.StreamEnded() { return http2streamError(st.id, http2ErrCodeProtocol) } if len(f.PseudoFields()) > 0 { return http2streamError(st.id, http2ErrCodeProtocol) } if st.trailer != nil { for _, hf := range f.RegularFields() { key := sc.canonicalHeader(hf.Name) if !http2ValidTrailerHeader(key) { return http2streamError(st.id, http2ErrCodeProtocol) } st.trailer[key] = append(st.trailer[key], hf.Value) } } st.endStream() return nil } func (sc *http2serverConn) processPriority(f *http2PriorityFrame) error { http2adjustStreamPriority(sc.streams, f.StreamID, f.http2PriorityParam) return nil } func http2adjustStreamPriority(streams map[uint32]*http2stream, streamID uint32, priority http2PriorityParam) { st, ok := streams[streamID] if !ok { return } st.weight = priority.Weight parent := streams[priority.StreamDep] if parent == st { return } for piter := parent; piter != nil; piter = piter.parent { if piter == st { parent.parent = st.parent break } } st.parent = parent if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) { for _, openStream := range streams { if openStream != st && openStream.parent == st.parent { openStream.parent = st } } } } func (sc *http2serverConn) newWriterAndRequest(st *http2stream, f *http2MetaHeadersFrame) (*http2responseWriter, *Request, error) { sc.serveG.check() method := f.PseudoValue("method") path := f.PseudoValue("path") scheme := f.PseudoValue("scheme") authority := f.PseudoValue("authority") isConnect := method == "CONNECT" if isConnect { if path != "" || scheme != "" || authority == "" { return nil, nil, http2streamError(f.StreamID, http2ErrCodeProtocol) } } else if method == "" || path == "" || (scheme != "https" && scheme != "http") { return nil, nil, http2streamError(f.StreamID, http2ErrCodeProtocol) } bodyOpen := !f.StreamEnded() if method == "HEAD" && bodyOpen { return nil, nil, http2streamError(f.StreamID, http2ErrCodeProtocol) } var tlsState *tls.ConnectionState // nil if not scheme https if scheme == "https" { tlsState = sc.tlsState } header := make(Header) for _, hf := range f.RegularFields() { header.Add(sc.canonicalHeader(hf.Name), hf.Value) } if authority == "" { authority = header.Get("Host") } needsContinue := header.Get("Expect") == "100-continue" if needsContinue { header.Del("Expect") } if cookies := header["Cookie"]; len(cookies) > 1 { header.Set("Cookie", strings.Join(cookies, "; ")) } // Setup Trailers var trailer Header for _, v := range header["Trailer"] { for _, key := range strings.Split(v, ",") { key = CanonicalHeaderKey(strings.TrimSpace(key)) switch key { case "Transfer-Encoding", "Trailer", "Content-Length": default: if trailer == nil { trailer = make(Header) } trailer[key] = nil } } } delete(header, "Trailer") body := &http2requestBody{ conn: sc, stream: st, needsContinue: needsContinue, } var url_ *url.URL var requestURI string if isConnect { url_ = &url.URL{Host: authority} requestURI = authority } else { var err error url_, err = url.ParseRequestURI(path) if err != nil { return nil, nil, http2streamError(f.StreamID, http2ErrCodeProtocol) } requestURI = path } req := &Request{ Method: method, URL: url_, RemoteAddr: sc.remoteAddrStr, Header: header, RequestURI: requestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, Host: authority, Body: body, Trailer: trailer, } req = http2requestWithContext(req, st.ctx) if bodyOpen { buf := make([]byte, http2initialWindowSize) body.pipe = &http2pipe{ b: &http2fixedBuffer{buf: buf}, } if vv, ok := header["Content-Length"]; ok { req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) } else { req.ContentLength = -1 } } rws := http2responseWriterStatePool.Get().(*http2responseWriterState) bwSave := rws.bw *rws = http2responseWriterState{} rws.conn = sc rws.bw = bwSave rws.bw.Reset(http2chunkWriter{rws}) rws.stream = st rws.req = req rws.body = body rw := &http2responseWriter{rws: rws} return rw, req, nil } func (sc *http2serverConn) getRequestBodyBuf() []byte { sc.serveG.check() if buf := sc.freeRequestBodyBuf; buf != nil { sc.freeRequestBodyBuf = nil return buf } return make([]byte, http2initialWindowSize) } // Run on its own goroutine. func (sc *http2serverConn) runHandler(rw *http2responseWriter, req *Request, handler func(ResponseWriter, *Request)) { didPanic := true defer func() { rw.rws.stream.cancelCtx() if didPanic { e := recover() // Same as net/http: const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] sc.writeFrameFromHandler(http2frameWriteMsg{ write: http2handlerPanicRST{rw.rws.stream.id}, stream: rw.rws.stream, }) sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) return } rw.handlerDone() }() handler(rw, req) didPanic = false } func http2handleHeaderListTooLong(w ResponseWriter, r *Request) { // 10.5.1 Limits on Header Block Size: // .. "A server that receives a larger header block than it is // willing to handle can send an HTTP 431 (Request Header Fields Too // Large) status code" const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ w.WriteHeader(statusRequestHeaderFieldsTooLarge) io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>") } // called from handler goroutines. // h may be nil. func (sc *http2serverConn) writeHeaders(st *http2stream, headerData *http2writeResHeaders) error { sc.serveG.checkNotOn() var errc chan error if headerData.h != nil { errc = http2errChanPool.Get().(chan error) } if err := sc.writeFrameFromHandler(http2frameWriteMsg{ write: headerData, stream: st, done: errc, }); err != nil { return err } if errc != nil { select { case err := <-errc: http2errChanPool.Put(errc) return err case <-sc.doneServing: return http2errClientDisconnected case <-st.cw: return http2errStreamClosed } } return nil } // called from handler goroutines. func (sc *http2serverConn) write100ContinueHeaders(st *http2stream) { sc.writeFrameFromHandler(http2frameWriteMsg{ write: http2write100ContinueHeadersFrame{st.id}, stream: st, }) } // A bodyReadMsg tells the server loop that the http.Handler read n // bytes of the DATA from the client on the given stream. type http2bodyReadMsg struct { st *http2stream n int } // called from handler goroutines. // Notes that the handler for the given stream ID read n bytes of its body // and schedules flow control tokens to be sent. func (sc *http2serverConn) noteBodyReadFromHandler(st *http2stream, n int) { sc.serveG.checkNotOn() select { case sc.bodyReadCh <- http2bodyReadMsg{st, n}: case <-sc.doneServing: } } func (sc *http2serverConn) noteBodyRead(st *http2stream, n int) { sc.serveG.check() sc.sendWindowUpdate(nil, n) if st.state != http2stateHalfClosedRemote && st.state != http2stateClosed { sc.sendWindowUpdate(st, n) } } // st may be nil for conn-level func (sc *http2serverConn) sendWindowUpdate(st *http2stream, n int) { sc.serveG.check() // "The legal range for the increment to the flow control // window is 1 to 2^31-1 (2,147,483,647) octets." // A Go Read call on 64-bit machines could in theory read // a larger Read than this. Very unlikely, but we handle it here // rather than elsewhere for now. const maxUint31 = 1<<31 - 1 for n >= maxUint31 { sc.sendWindowUpdate32(st, maxUint31) n -= maxUint31 } sc.sendWindowUpdate32(st, int32(n)) } // st may be nil for conn-level func (sc *http2serverConn) sendWindowUpdate32(st *http2stream, n int32) { sc.serveG.check() if n == 0 { return } if n < 0 { panic("negative update") } var streamID uint32 if st != nil { streamID = st.id } sc.writeFrame(http2frameWriteMsg{ write: http2writeWindowUpdate{streamID: streamID, n: uint32(n)}, stream: st, }) var ok bool if st == nil { ok = sc.inflow.add(n) } else { ok = st.inflow.add(n) } if !ok { panic("internal error; sent too many window updates without decrements?") } } type http2requestBody struct { stream *http2stream conn *http2serverConn closed bool pipe *http2pipe // non-nil if we have a HTTP entity message body needsContinue bool // need to send a 100-continue } func (b *http2requestBody) Close() error { if b.pipe != nil { b.pipe.BreakWithError(http2errClosedBody) } b.closed = true return nil } func (b *http2requestBody) Read(p []byte) (n int, err error) { if b.needsContinue { b.needsContinue = false b.conn.write100ContinueHeaders(b.stream) } if b.pipe == nil { return 0, io.EOF } n, err = b.pipe.Read(p) if n > 0 { b.conn.noteBodyReadFromHandler(b.stream, n) } return } // responseWriter is the http.ResponseWriter implementation. It's // intentionally small (1 pointer wide) to minimize garbage. The // responseWriterState pointer inside is zeroed at the end of a // request (in handlerDone) and calls on the responseWriter thereafter // simply crash (caller's mistake), but the much larger responseWriterState // and buffers are reused between multiple requests. type http2responseWriter struct { rws *http2responseWriterState } // Optional http.ResponseWriter interfaces implemented. var ( _ CloseNotifier = (*http2responseWriter)(nil) _ Flusher = (*http2responseWriter)(nil) _ http2stringWriter = (*http2responseWriter)(nil) ) type http2responseWriterState struct { // immutable within a request: stream *http2stream req *Request body *http2requestBody // to close at end of request, if DATA frames didn't conn *http2serverConn // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} // mutated by http.Handler goroutine: handlerHeader Header // nil until called snapHeader Header // snapshot of handlerHeader at WriteHeader time trailers []string // set in writeChunk status int // status code passed to WriteHeader wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 closeNotifierMu sync.Mutex // guards closeNotifierCh closeNotifierCh chan bool // nil until first used } type http2chunkWriter struct{ rws *http2responseWriterState } func (cw http2chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } func (rws *http2responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } // declareTrailer is called for each Trailer header when the // response header is written. It notes that a header will need to be // written in the trailers at the end of the response. func (rws *http2responseWriterState) declareTrailer(k string) { k = CanonicalHeaderKey(k) if !http2ValidTrailerHeader(k) { rws.conn.logf("ignoring invalid trailer %q", k) return } if !http2strSliceContains(rws.trailers, k) { rws.trailers = append(rws.trailers, k) } } // writeChunk writes chunks from the bufio.Writer. But because // bufio.Writer may bypass its chunking, sometimes p may be // arbitrarily large. // // writeChunk is also responsible (on the first chunk) for sending the // HEADER response. func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) { if !rws.wroteHeader { rws.writeHeader(200) } isHeadResp := rws.req.Method == "HEAD" if !rws.sentHeader { rws.sentHeader = true var ctype, clen string if clen = rws.snapHeader.Get("Content-Length"); clen != "" { rws.snapHeader.Del("Content-Length") clen64, err := strconv.ParseInt(clen, 10, 64) if err == nil && clen64 >= 0 { rws.sentContentLen = clen64 } else { clen = "" } } if clen == "" && rws.handlerDone && http2bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { clen = strconv.Itoa(len(p)) } _, hasContentType := rws.snapHeader["Content-Type"] if !hasContentType && http2bodyAllowedForStatus(rws.status) { ctype = DetectContentType(p) } var date string if _, ok := rws.snapHeader["Date"]; !ok { date = time.Now().UTC().Format(TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { http2foreachHeaderElement(v, rws.declareTrailer) } endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp err = rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{ streamID: rws.stream.id, httpResCode: rws.status, h: rws.snapHeader, endStream: endStream, contentType: ctype, contentLength: clen, date: date, }) if err != nil { return 0, err } if endStream { return 0, nil } } if isHeadResp { return len(p), nil } if len(p) == 0 && !rws.handlerDone { return 0, nil } if rws.handlerDone { rws.promoteUndeclaredTrailers() } endStream := rws.handlerDone && !rws.hasTrailers() if len(p) > 0 || endStream { if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { return 0, err } } if rws.handlerDone && rws.hasTrailers() { err = rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{ streamID: rws.stream.id, h: rws.handlerHeader, trailers: rws.trailers, endStream: true, }) return len(p), err } return len(p), nil } // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys // that, if present, signals that the map entry is actually for // the response trailers, and not the response headers. The prefix // is stripped after the ServeHTTP call finishes and the values are // sent in the trailers. // // This mechanism is intended only for trailers that are not known // prior to the headers being written. If the set of trailers is fixed // or known before the header is written, the normal Go trailers mechanism // is preferred: // https://golang.org/pkg/net/http/#ResponseWriter // https://golang.org/pkg/net/http/#example_ResponseWriter_trailers const http2TrailerPrefix = "Trailer:" // promoteUndeclaredTrailers permits http.Handlers to set trailers // after the header has already been flushed. Because the Go // ResponseWriter interface has no way to set Trailers (only the // Header), and because we didn't want to expand the ResponseWriter // interface, and because nobody used trailers, and because RFC 2616 // says you SHOULD (but not must) predeclare any trailers in the // header, the official ResponseWriter rules said trailers in Go must // be predeclared, and then we reuse the same ResponseWriter.Header() // map to mean both Headers and Trailers. When it's time to write the // Trailers, we pick out the fields of Headers that were declared as // trailers. That worked for a while, until we found the first major // user of Trailers in the wild: gRPC (using them only over http2), // and gRPC libraries permit setting trailers mid-stream without // predeclarnig them. So: change of plans. We still permit the old // way, but we also permit this hack: if a Header() key begins with // "Trailer:", the suffix of that key is a Trailer. Because ':' is an // invalid token byte anyway, there is no ambiguity. (And it's already // filtered out) It's mildly hacky, but not terrible. // // This method runs after the Handler is done and promotes any Header // fields to be trailers. func (rws *http2responseWriterState) promoteUndeclaredTrailers() { for k, vv := range rws.handlerHeader { if !strings.HasPrefix(k, http2TrailerPrefix) { continue } trailerKey := strings.TrimPrefix(k, http2TrailerPrefix) rws.declareTrailer(trailerKey) rws.handlerHeader[CanonicalHeaderKey(trailerKey)] = vv } if len(rws.trailers) > 1 { sorter := http2sorterPool.Get().(*http2sorter) sorter.SortStrings(rws.trailers) http2sorterPool.Put(sorter) } } func (w *http2responseWriter) Flush() { rws := w.rws if rws == nil { panic("Header called after Handler finished") } if rws.bw.Buffered() > 0 { if err := rws.bw.Flush(); err != nil { return } } else { rws.writeChunk(nil) } } func (w *http2responseWriter) CloseNotify() <-chan bool { rws := w.rws if rws == nil { panic("CloseNotify called after Handler finished") } rws.closeNotifierMu.Lock() ch := rws.closeNotifierCh if ch == nil { ch = make(chan bool, 1) rws.closeNotifierCh = ch go func() { rws.stream.cw.Wait() ch <- true }() } rws.closeNotifierMu.Unlock() return ch } func (w *http2responseWriter) Header() Header { rws := w.rws if rws == nil { panic("Header called after Handler finished") } if rws.handlerHeader == nil { rws.handlerHeader = make(Header) } return rws.handlerHeader } func (w *http2responseWriter) WriteHeader(code int) { rws := w.rws if rws == nil { panic("WriteHeader called after Handler finished") } rws.writeHeader(code) } func (rws *http2responseWriterState) writeHeader(code int) { if !rws.wroteHeader { rws.wroteHeader = true rws.status = code if len(rws.handlerHeader) > 0 { rws.snapHeader = http2cloneHeader(rws.handlerHeader) } } } func http2cloneHeader(h Header) Header { h2 := make(Header, len(h)) for k, vv := range h { vv2 := make([]string, len(vv)) copy(vv2, vv) h2[k] = vv2 } return h2 } // The Life Of A Write is like this: // // * Handler calls w.Write or w.WriteString -> // * -> rws.bw (*bufio.Writer) -> // * (Handler migth call Flush) // * -> chunkWriter{rws} // * -> responseWriterState.writeChunk(p []byte) // * -> responseWriterState.writeChunk (most of the magic; see comment there) func (w *http2responseWriter) Write(p []byte) (n int, err error) { return w.write(len(p), p, "") } func (w *http2responseWriter) WriteString(s string) (n int, err error) { return w.write(len(s), nil, s) } // either dataB or dataS is non-zero. func (w *http2responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { rws := w.rws if rws == nil { panic("Write called after Handler finished") } if !rws.wroteHeader { w.WriteHeader(200) } if !http2bodyAllowedForStatus(rws.status) { return 0, ErrBodyNotAllowed } rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { return 0, errors.New("http2: handler wrote more than declared Content-Length") } if dataB != nil { return rws.bw.Write(dataB) } else { return rws.bw.WriteString(dataS) } } func (w *http2responseWriter) handlerDone() { rws := w.rws rws.handlerDone = true w.Flush() w.rws = nil http2responseWriterStatePool.Put(rws) } // foreachHeaderElement splits v according to the "#rule" construction // in RFC 2616 section 2.1 and calls fn for each non-empty element. func http2foreachHeaderElement(v string, fn func(string)) { v = textproto.TrimString(v) if v == "" { return } if !strings.Contains(v, ",") { fn(v) return } for _, f := range strings.Split(v, ",") { if f = textproto.TrimString(f); f != "" { fn(f) } } } // From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 var http2connHeaders = []string{ "Connection", "Keep-Alive", "Proxy-Connection", "Transfer-Encoding", "Upgrade", } // checkValidHTTP2Request checks whether req is a valid HTTP/2 request, // per RFC 7540 Section 8.1.2.2. // The returned error is reported to users. func http2checkValidHTTP2Request(req *Request) error { for _, h := range http2connHeaders { if _, ok := req.Header[h]; ok { return fmt.Errorf("request header %q is not valid in HTTP/2", h) } } te := req.Header["Te"] if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) } return nil } func http2new400Handler(err error) HandlerFunc { return func(w ResponseWriter, r *Request) { Error(w, err.Error(), StatusBadRequest) } } // ValidTrailerHeader reports whether name is a valid header field name to appear // in trailers. // See: http://tools.ietf.org/html/rfc7230#section-4.1.2 func http2ValidTrailerHeader(name string) bool { name = CanonicalHeaderKey(name) if strings.HasPrefix(name, "If-") || http2badTrailer[name] { return false } return true } var http2badTrailer = map[string]bool{ "Authorization": true, "Cache-Control": true, "Connection": true, "Content-Encoding": true, "Content-Length": true, "Content-Range": true, "Content-Type": true, "Expect": true, "Host": true, "Keep-Alive": true, "Max-Forwards": true, "Pragma": true, "Proxy-Authenticate": true, "Proxy-Authorization": true, "Proxy-Connection": true, "Range": true, "Realm": true, "Te": true, "Trailer": true, "Transfer-Encoding": true, "Www-Authenticate": true, } const ( // transportDefaultConnFlow is how many connection-level flow control // tokens we give the server at start-up, past the default 64k. http2transportDefaultConnFlow = 1 << 30 // transportDefaultStreamFlow is how many stream-level flow // control tokens we announce to the peer, and how many bytes // we buffer per stream. http2transportDefaultStreamFlow = 4 << 20 // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send // a stream-level WINDOW_UPDATE for at a time. http2transportDefaultStreamMinRefresh = 4 << 10 http2defaultUserAgent = "Go-http-client/2.0" ) // Transport is an HTTP/2 Transport. // // A Transport internally caches connections to servers. It is safe // for concurrent use by multiple goroutines. type http2Transport struct { // DialTLS specifies an optional dial function for creating // TLS connections for requests. // // If DialTLS is nil, tls.Dial is used. // // If the returned net.Conn has a ConnectionState method like tls.Conn, // it will be used to set http.Response.TLS. DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) // TLSClientConfig specifies the TLS configuration to use with // tls.Client. If nil, the default configuration is used. TLSClientConfig *tls.Config // ConnPool optionally specifies an alternate connection pool to use. // If nil, the default is used. ConnPool http2ClientConnPool // DisableCompression, if true, prevents the Transport from // requesting compression with an "Accept-Encoding: gzip" // request header when the Request contains no existing // Accept-Encoding value. If the Transport requests gzip on // its own and gets a gzipped response, it's transparently // decoded in the Response.Body. However, if the user // explicitly requested gzip it is not automatically // uncompressed. DisableCompression bool // AllowHTTP, if true, permits HTTP/2 requests using the insecure, // plain-text "http" scheme. Note that this does not enable h2c support. AllowHTTP bool // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to // send in the initial settings frame. It is how many bytes // of response headers are allow. Unlike the http2 spec, zero here // means to use a default limit (currently 10MB). If you actually // want to advertise an ulimited value to the peer, Transport // interprets the highest possible value here (0xffffffff or 1<<32-1) // to mean no limit. MaxHeaderListSize uint32 // t1, if non-nil, is the standard library Transport using // this transport. Its settings are used (but not its // RoundTrip method, etc). t1 *Transport connPoolOnce sync.Once connPoolOrDef http2ClientConnPool // non-nil version of ConnPool } func (t *http2Transport) maxHeaderListSize() uint32 { if t.MaxHeaderListSize == 0 { return 10 << 20 } if t.MaxHeaderListSize == 0xffffffff { return 0 } return t.MaxHeaderListSize } func (t *http2Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } var http2errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It requires Go 1.6 or later and returns an error if the net/http package is too old // or if t1 has already been HTTP/2-enabled. func http2ConfigureTransport(t1 *Transport) error { _, err := http2configureTransport(t1) return err } func (t *http2Transport) connPool() http2ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef } func (t *http2Transport) initConnPool() { if t.ConnPool != nil { t.connPoolOrDef = t.ConnPool } else { t.connPoolOrDef = &http2clientConnPool{t: t} } } // ClientConn is the state of a single HTTP/2 client connection to an // HTTP/2 server. type http2ClientConn struct { t *http2Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls singleUse bool // whether being used for a single http.Request // readLoop goroutine fields: readerDone chan struct{} // closed on error readerErr error // set before readerDone is closed mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes flow http2flow // our conn-level flow control quota (cs.flow is per stream) inflow http2flow // peer's conn-level flow control closed bool wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back goAway *http2GoAwayFrame // if non-nil, the GoAwayFrame we received goAwayDebug string // goAway frame's debug data, retained as a string streams map[uint32]*http2clientStream // client-initiated nextStreamID uint32 bw *bufio.Writer br *bufio.Reader fr *http2Framer lastActive time.Time // Settings from peer: (also guarded by mu) maxFrameSize uint32 maxConcurrentStreams uint32 initialWindowSize uint32 hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder freeBuf [][]byte wmu sync.Mutex // held while writing; acquire AFTER mu if holding both werr error // first write error that has occurred } // clientStream is the state for a single HTTP/2 stream. One of these // is created for each Transport.RoundTrip call. type http2clientStream struct { cc *http2ClientConn req *Request trace *http2clientTrace // or nil ID uint32 resc chan http2resAndError bufPipe http2pipe // buffered pipe with the flow-controlled response payload requestedGzip bool on100 func() // optional code to run if get a 100 continue response flow http2flow // guarded by cc.mu inflow http2flow // guarded by cc.mu bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read readErr error // sticky read error; owned by transportResponseBody.Read stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu peerReset chan struct{} // closed on peer reset resetErr error // populated before peerReset is closed done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu // owned by clientConnReadLoop: firstByte bool // got the first response byte pastHeaders bool // got first MetaHeadersFrame (actual headers) pastTrailers bool // got optional second MetaHeadersFrame (trailers) trailer Header // accumulated trailers resTrailer *Header // client's Response.Trailer } // awaitRequestCancel runs in its own goroutine and waits for the user // to cancel a RoundTrip request, its context to expire, or for the // request to be done (any way it might be removed from the cc.streams // map: peer reset, successful completion, TCP connection breakage, // etc) func (cs *http2clientStream) awaitRequestCancel(req *Request) { ctx := http2reqContext(req) if req.Cancel == nil && ctx.Done() == nil { return } select { case <-req.Cancel: cs.bufPipe.CloseWithError(http2errRequestCanceled) cs.cc.writeStreamReset(cs.ID, http2ErrCodeCancel, nil) case <-ctx.Done(): cs.bufPipe.CloseWithError(ctx.Err()) cs.cc.writeStreamReset(cs.ID, http2ErrCodeCancel, nil) case <-cs.done: } } // checkResetOrDone reports any error sent in a RST_STREAM frame by the // server, or errStreamClosed if the stream is complete. func (cs *http2clientStream) checkResetOrDone() error { select { case <-cs.peerReset: return cs.resetErr case <-cs.done: return http2errStreamClosed default: return nil } } func (cs *http2clientStream) abortRequestBodyWrite(err error) { if err == nil { panic("nil error") } cc := cs.cc cc.mu.Lock() cs.stopReqBody = err cc.cond.Broadcast() cc.mu.Unlock() } type http2stickyErrWriter struct { w io.Writer err *error } func (sew http2stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } n, err = sew.w.Write(p) *sew.err = err return } var http2ErrNoCachedConn = errors.New("http2: no cached connection was available") // RoundTripOpt are options for the Transport.RoundTripOpt method. type http2RoundTripOpt struct { // OnlyCachedConn controls whether RoundTripOpt may // create a new TCP connection. If set true and // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool } func (t *http2Transport) RoundTrip(req *Request) (*Response, error) { return t.RoundTripOpt(req, http2RoundTripOpt{}) } // authorityAddr returns a given authority (a host/IP, or host:port / ip:port) // and returns a host:port. The port 443 is added if needed. func http2authorityAddr(scheme string, authority string) (addr string) { host, port, err := net.SplitHostPort(authority) if err != nil { port = "443" if scheme == "http" { port = "80" } host = authority } if a, err := idna.ToASCII(host); err == nil { host = a } return net.JoinHostPort(host, port) } // RoundTripOpt is like RoundTrip, but takes options. func (t *http2Transport) RoundTripOpt(req *Request, opt http2RoundTripOpt) (*Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { return nil, errors.New("http2: unsupported scheme") } addr := http2authorityAddr(req.URL.Scheme, req.URL.Host) for { cc, err := t.connPool().GetClientConn(req, addr) if err != nil { t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } http2traceGotConn(req, cc) res, err := cc.RoundTrip(req) if http2shouldRetryRequest(req, err) { continue } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err } return res, nil } } // CloseIdleConnections closes any connections which were previously // connected from previous requests but are now sitting idle. // It does not interrupt any connections currently in use. func (t *http2Transport) CloseIdleConnections() { if cp, ok := t.connPool().(http2clientConnPoolIdleCloser); ok { cp.closeIdleConnections() } } var ( http2errClientConnClosed = errors.New("http2: client conn is closed") http2errClientConnUnusable = errors.New("http2: client conn not usable") ) func http2shouldRetryRequest(req *Request, err error) bool { return err == http2errClientConnUnusable } func (t *http2Transport) dialClientConn(addr string, singleUse bool) (*http2ClientConn, error) { host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err } tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) if err != nil { return nil, err } return t.newClientConn(tconn, singleUse) } func (t *http2Transport) newTLSConfig(host string) *tls.Config { cfg := new(tls.Config) if t.TLSClientConfig != nil { *cfg = *http2cloneTLSConfig(t.TLSClientConfig) } if !http2strSliceContains(cfg.NextProtos, http2NextProtoTLS) { cfg.NextProtos = append([]string{http2NextProtoTLS}, cfg.NextProtos...) } if cfg.ServerName == "" { cfg.ServerName = host } return cfg } func (t *http2Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { if t.DialTLS != nil { return t.DialTLS } return t.dialTLSDefault } func (t *http2Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { cn, err := tls.Dial(network, addr, cfg) if err != nil { return nil, err } if err := cn.Handshake(); err != nil { return nil, err } if !cfg.InsecureSkipVerify { if err := cn.VerifyHostname(cfg.ServerName); err != nil { return nil, err } } state := cn.ConnectionState() if p := state.NegotiatedProtocol; p != http2NextProtoTLS { return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, http2NextProtoTLS) } if !state.NegotiatedProtocolIsMutual { return nil, errors.New("http2: could not negotiate protocol mutually") } return cn, nil } // disableKeepAlives reports whether connections should be closed as // soon as possible after handling the first request. func (t *http2Transport) disableKeepAlives() bool { return t.t1 != nil && t.t1.DisableKeepAlives } func (t *http2Transport) expectContinueTimeout() time.Duration { if t.t1 == nil { return 0 } return http2transportExpectContinueTimeout(t.t1) } func (t *http2Transport) NewClientConn(c net.Conn) (*http2ClientConn, error) { return t.newClientConn(c, false) } func (t *http2Transport) newClientConn(c net.Conn, singleUse bool) (*http2ClientConn, error) { cc := &http2ClientConn{ t: t, tconn: c, readerDone: make(chan struct{}), nextStreamID: 1, maxFrameSize: 16 << 10, initialWindowSize: 65535, maxConcurrentStreams: 1000, streams: make(map[uint32]*http2clientStream), singleUse: singleUse, wantSettingsAck: true, } if http2VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) } cc.cond = sync.NewCond(&cc.mu) cc.flow.add(int32(http2initialWindowSize)) cc.bw = bufio.NewWriter(http2stickyErrWriter{c, &cc.werr}) cc.br = bufio.NewReader(c) cc.fr = http2NewFramer(cc.bw, cc.br) cc.fr.ReadMetaHeaders = hpack.NewDecoder(http2initialHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) if cs, ok := c.(http2connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state } initialSettings := []http2Setting{ {ID: http2SettingEnablePush, Val: 0}, {ID: http2SettingInitialWindowSize, Val: http2transportDefaultStreamFlow}, } if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, http2Setting{ID: http2SettingMaxHeaderListSize, Val: max}) } cc.bw.Write(http2clientPreface) cc.fr.WriteSettings(initialSettings...) cc.fr.WriteWindowUpdate(0, http2transportDefaultConnFlow) cc.inflow.add(http2transportDefaultConnFlow + http2initialWindowSize) cc.bw.Flush() if cc.werr != nil { return nil, cc.werr } go cc.readLoop() return cc, nil } func (cc *http2ClientConn) setGoAway(f *http2GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() old := cc.goAway cc.goAway = f if cc.goAwayDebug == "" { cc.goAwayDebug = string(f.DebugData()) } if old != nil && old.ErrCode != http2ErrCodeNo { cc.goAway.ErrCode = old.ErrCode } } func (cc *http2ClientConn) CanTakeNewRequest() bool { cc.mu.Lock() defer cc.mu.Unlock() return cc.canTakeNewRequestLocked() } func (cc *http2ClientConn) canTakeNewRequestLocked() bool { if cc.singleUse && cc.nextStreamID > 1 { return false } return cc.goAway == nil && !cc.closed && int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && cc.nextStreamID < math.MaxInt32 } func (cc *http2ClientConn) closeIfIdle() { cc.mu.Lock() if len(cc.streams) > 0 { cc.mu.Unlock() return } cc.closed = true nextID := cc.nextStreamID cc.mu.Unlock() if http2VerboseLogs { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) } cc.tconn.Close() } const http2maxAllocFrameSize = 512 << 10 // frameBuffer returns a scratch buffer suitable for writing DATA frames. // They're capped at the min of the peer's max frame size or 512KB // (kinda arbitrarily), but definitely capped so we don't allocate 4GB // bufers. func (cc *http2ClientConn) frameScratchBuffer() []byte { cc.mu.Lock() size := cc.maxFrameSize if size > http2maxAllocFrameSize { size = http2maxAllocFrameSize } for i, buf := range cc.freeBuf { if len(buf) >= int(size) { cc.freeBuf[i] = nil cc.mu.Unlock() return buf[:size] } } cc.mu.Unlock() return make([]byte, size) } func (cc *http2ClientConn) putFrameScratchBuffer(buf []byte) { cc.mu.Lock() defer cc.mu.Unlock() const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. if len(cc.freeBuf) < maxBufs { cc.freeBuf = append(cc.freeBuf, buf) return } for i, old := range cc.freeBuf { if old == nil { cc.freeBuf[i] = buf return } } } // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var http2errRequestCanceled = errors.New("net/http: request canceled") func http2commaSeparatedTrailers(req *Request) (string, error) { keys := make([]string, 0, len(req.Trailer)) for k := range req.Trailer { k = CanonicalHeaderKey(k) switch k { case "Transfer-Encoding", "Trailer", "Content-Length": return "", &http2badStringError{"invalid Trailer key", k} } keys = append(keys, k) } if len(keys) > 0 { sort.Strings(keys) return strings.Join(keys, ","), nil } return "", nil } func (cc *http2ClientConn) responseHeaderTimeout() time.Duration { if cc.t.t1 != nil { return cc.t.t1.ResponseHeaderTimeout } return 0 } // checkConnHeaders checks whether req has any invalid connection-level headers. // per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. // Certain headers are special-cased as okay but not transmitted later. func http2checkConnHeaders(req *Request) error { if v := req.Header.Get("Upgrade"); v != "" { return errors.New("http2: invalid Upgrade request header") } if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 { return errors.New("http2: invalid Transfer-Encoding request header") } if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 { return errors.New("http2: invalid Connection request header") } return nil } func http2bodyAndLength(req *Request) (body io.Reader, contentLen int64) { body = req.Body if body == nil { return nil, 0 } if req.ContentLength != 0 { return req.Body, req.ContentLength } // We have a body but a zero content length. Test to see if // it's actually zero or just unset. var buf [1]byte n, rerr := body.Read(buf[:]) if rerr != nil && rerr != io.EOF { return http2errorReader{rerr}, -1 } if n == 1 { if rerr == io.EOF { return bytes.NewReader(buf[:]), 1 } return io.MultiReader(bytes.NewReader(buf[:]), body), -1 } return nil, 0 } func (cc *http2ClientConn) RoundTrip(req *Request) (*Response, error) { if err := http2checkConnHeaders(req); err != nil { return nil, err } trailers, err := http2commaSeparatedTrailers(req) if err != nil { return nil, err } hasTrailers := trailers != "" cc.mu.Lock() cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { cc.mu.Unlock() return nil, http2errClientConnUnusable } body, contentLen := http2bodyAndLength(req) hasBody := body != nil // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? var requestedGzip bool if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && req.Header.Get("Range") == "" && req.Method != "HEAD" { requestedGzip = true } hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) if err != nil { cc.mu.Unlock() return nil, err } cs := cc.newStream() cs.req = req cs.trace = http2requestTrace(req) cs.requestedGzip = requestedGzip bodyWriter := cc.t.getBodyWriterState(cs, body) cs.on100 = bodyWriter.on100 cc.wmu.Lock() endStream := !hasBody && !hasTrailers werr := cc.writeHeaders(cs.ID, endStream, hdrs) cc.wmu.Unlock() http2traceWroteHeaders(cs.trace) cc.mu.Unlock() if werr != nil { if hasBody { req.Body.Close() bodyWriter.cancel() } cc.forgetStreamID(cs.ID) http2traceWroteRequest(cs.trace, werr) return nil, werr } var respHeaderTimer <-chan time.Time if hasBody { bodyWriter.scheduleBodyWrite() } else { http2traceWroteRequest(cs.trace, nil) if d := cc.responseHeaderTimeout(); d != 0 { timer := time.NewTimer(d) defer timer.Stop() respHeaderTimer = timer.C } } readLoopResCh := cs.resc bodyWritten := false ctx := http2reqContext(req) handleReadLoopResponse := func(re http2resAndError) (*Response, error) { res := re.res if re.err != nil || res.StatusCode > 299 { bodyWriter.cancel() cs.abortRequestBodyWrite(http2errStopReqBodyWrite) } if re.err != nil { cc.forgetStreamID(cs.ID) return nil, re.err } res.Request = req res.TLS = cc.tlsState return res, nil } for { select { case re := <-readLoopResCh: return handleReadLoopResponse(re) case <-respHeaderTimer: cc.forgetStreamID(cs.ID) if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, http2ErrCodeCancel, nil) } else { bodyWriter.cancel() cs.abortRequestBodyWrite(http2errStopReqBodyWriteAndCancel) } return nil, http2errTimeout case <-ctx.Done(): cc.forgetStreamID(cs.ID) if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, http2ErrCodeCancel, nil) } else { bodyWriter.cancel() cs.abortRequestBodyWrite(http2errStopReqBodyWriteAndCancel) } return nil, ctx.Err() case <-req.Cancel: cc.forgetStreamID(cs.ID) if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, http2ErrCodeCancel, nil) } else { bodyWriter.cancel() cs.abortRequestBodyWrite(http2errStopReqBodyWriteAndCancel) } return nil, http2errRequestCanceled case <-cs.peerReset: return nil, cs.resetErr case err := <-bodyWriter.resc: select { case re := <-readLoopResCh: return handleReadLoopResponse(re) default: } if err != nil { return nil, err } bodyWritten = true if d := cc.responseHeaderTimeout(); d != 0 { timer := time.NewTimer(d) defer timer.Stop() respHeaderTimer = timer.C } } } } // requires cc.wmu be held func (cc *http2ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { first := true frameSize := int(cc.maxFrameSize) for len(hdrs) > 0 && cc.werr == nil { chunk := hdrs if len(chunk) > frameSize { chunk = chunk[:frameSize] } hdrs = hdrs[len(chunk):] endHeaders := len(hdrs) == 0 if first { cc.fr.WriteHeaders(http2HeadersFrameParam{ StreamID: streamID, BlockFragment: chunk, EndStream: endStream, EndHeaders: endHeaders, }) first = false } else { cc.fr.WriteContinuation(streamID, endHeaders, chunk) } } cc.bw.Flush() return cc.werr } // internal error values; they don't escape to callers var ( // abort request body write; don't send cancel http2errStopReqBodyWrite = errors.New("http2: aborting request body write") // abort request body write, but send stream reset of cancel. http2errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") ) func (cs *http2clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { cc := cs.cc sentEnd := false buf := cc.frameScratchBuffer() defer cc.putFrameScratchBuffer(buf) defer func() { http2traceWroteRequest(cs.trace, err) cerr := bodyCloser.Close() if err == nil { err = cerr } }() req := cs.req hasTrailers := req.Trailer != nil var sawEOF bool for !sawEOF { n, err := body.Read(buf) if err == io.EOF { sawEOF = true err = nil } else if err != nil { return err } remain := buf[:n] for len(remain) > 0 && err == nil { var allowed int32 allowed, err = cs.awaitFlowControl(len(remain)) switch { case err == http2errStopReqBodyWrite: return err case err == http2errStopReqBodyWriteAndCancel: cc.writeStreamReset(cs.ID, http2ErrCodeCancel, nil) return err case err != nil: return err } cc.wmu.Lock() data := remain[:allowed] remain = remain[allowed:] sentEnd = sawEOF && len(remain) == 0 && !hasTrailers err = cc.fr.WriteData(cs.ID, sentEnd, data) if err == nil { err = cc.bw.Flush() } cc.wmu.Unlock() } if err != nil { return err } } if sentEnd { return nil } var trls []byte if hasTrailers { cc.mu.Lock() defer cc.mu.Unlock() trls = cc.encodeTrailers(req) } cc.wmu.Lock() defer cc.wmu.Unlock() if len(trls) > 0 { err = cc.writeHeaders(cs.ID, true, trls) } else { err = cc.fr.WriteData(cs.ID, true, nil) } if ferr := cc.bw.Flush(); ferr != nil && err == nil { err = ferr } return err } // awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow // control tokens from the server. // It returns either the non-zero number of tokens taken or an error // if the stream is dead. func (cs *http2clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { cc := cs.cc cc.mu.Lock() defer cc.mu.Unlock() for { if cc.closed { return 0, http2errClientConnClosed } if cs.stopReqBody != nil { return 0, cs.stopReqBody } if err := cs.checkResetOrDone(); err != nil { return 0, err } if a := cs.flow.available(); a > 0 { take := a if int(take) > maxBytes { take = int32(maxBytes) } if take > int32(cc.maxFrameSize) { take = int32(cc.maxFrameSize) } cs.flow.take(take) return take, nil } cc.cond.Wait() } } type http2badStringError struct { what string str string } func (e *http2badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } // requires cc.mu be held. func (cc *http2ClientConn) encodeHeaders(req *Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() host := req.Host if host == "" { host = req.URL.Host } host, err := httplex.PunycodeHostPort(host) if err != nil { return nil, err } var path string if req.Method != "CONNECT" { path = req.URL.RequestURI() if !http2validPseudoPath(path) { orig := path path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) if !http2validPseudoPath(path) { if req.URL.Opaque != "" { return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) } else { return nil, fmt.Errorf("invalid request :path %q", orig) } } } } for k, vv := range req.Header { if !httplex.ValidHeaderFieldName(k) { return nil, fmt.Errorf("invalid HTTP header name %q", k) } for _, v := range vv { if !httplex.ValidHeaderFieldValue(v) { return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) } } } cc.writeHeader(":authority", host) cc.writeHeader(":method", req.Method) if req.Method != "CONNECT" { cc.writeHeader(":path", path) cc.writeHeader(":scheme", "https") } if trailers != "" { cc.writeHeader("trailer", trailers) } var didUA bool for k, vv := range req.Header { lowKey := strings.ToLower(k) switch lowKey { case "host", "content-length": continue case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": continue case "user-agent": didUA = true if len(vv) < 1 { continue } vv = vv[:1] if vv[0] == "" { continue } } for _, v := range vv { cc.writeHeader(lowKey, v) } } if http2shouldSendReqContentLength(req.Method, contentLength) { cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) } if addGzipHeader { cc.writeHeader("accept-encoding", "gzip") } if !didUA { cc.writeHeader("user-agent", http2defaultUserAgent) } return cc.hbuf.Bytes(), nil } // shouldSendReqContentLength reports whether the http2.Transport should send // a "content-length" request header. This logic is basically a copy of the net/http // transferWriter.shouldSendContentLength. // The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). // -1 means unknown. func http2shouldSendReqContentLength(method string, contentLength int64) bool { if contentLength > 0 { return true } if contentLength < 0 { return false } switch method { case "POST", "PUT", "PATCH": return true default: return false } } // requires cc.mu be held. func (cc *http2ClientConn) encodeTrailers(req *Request) []byte { cc.hbuf.Reset() for k, vv := range req.Trailer { lowKey := strings.ToLower(k) for _, v := range vv { cc.writeHeader(lowKey, v) } } return cc.hbuf.Bytes() } func (cc *http2ClientConn) writeHeader(name, value string) { if http2VerboseLogs { log.Printf("http2: Transport encoding header %q = %q", name, value) } cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) } type http2resAndError struct { res *Response err error } // requires cc.mu be held. func (cc *http2ClientConn) newStream() *http2clientStream { cs := &http2clientStream{ cc: cc, ID: cc.nextStreamID, resc: make(chan http2resAndError, 1), peerReset: make(chan struct{}), done: make(chan struct{}), } cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) cs.inflow.add(http2transportDefaultStreamFlow) cs.inflow.setConnFlow(&cc.inflow) cc.nextStreamID += 2 cc.streams[cs.ID] = cs return cs } func (cc *http2ClientConn) forgetStreamID(id uint32) { cc.streamByID(id, true) } func (cc *http2ClientConn) streamByID(id uint32, andRemove bool) *http2clientStream { cc.mu.Lock() defer cc.mu.Unlock() cs := cc.streams[id] if andRemove && cs != nil && !cc.closed { cc.lastActive = time.Now() delete(cc.streams, id) close(cs.done) cc.cond.Broadcast() } return cs } // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type http2clientConnReadLoop struct { cc *http2ClientConn activeRes map[uint32]*http2clientStream // keyed by streamID closeWhenIdle bool } // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *http2ClientConn) readLoop() { rl := &http2clientConnReadLoop{ cc: cc, activeRes: make(map[uint32]*http2clientStream), } defer rl.cleanup() cc.readerErr = rl.run() if ce, ok := cc.readerErr.(http2ConnectionError); ok { cc.wmu.Lock() cc.fr.WriteGoAway(0, http2ErrCode(ce), nil) cc.wmu.Unlock() } } // GoAwayError is returned by the Transport when the server closes the // TCP connection after sending a GOAWAY frame. type http2GoAwayError struct { LastStreamID uint32 ErrCode http2ErrCode DebugData string } func (e http2GoAwayError) Error() string { return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", e.LastStreamID, e.ErrCode, e.DebugData) } func http2isEOFOrNetReadError(err error) bool { if err == io.EOF { return true } ne, ok := err.(*net.OpError) return ok && ne.Op == "read" } func (rl *http2clientConnReadLoop) cleanup() { cc := rl.cc defer cc.tconn.Close() defer cc.t.connPool().MarkDead(cc) defer close(cc.readerDone) err := cc.readerErr cc.mu.Lock() if cc.goAway != nil && http2isEOFOrNetReadError(err) { err = http2GoAwayError{ LastStreamID: cc.goAway.LastStreamID, ErrCode: cc.goAway.ErrCode, DebugData: cc.goAwayDebug, } } else if err == io.EOF { err = io.ErrUnexpectedEOF } for _, cs := range rl.activeRes { cs.bufPipe.CloseWithError(err) } for _, cs := range cc.streams { select { case cs.resc <- http2resAndError{err: err}: default: } close(cs.done) } cc.closed = true cc.cond.Broadcast() cc.mu.Unlock() } func (rl *http2clientConnReadLoop) run() error { cc := rl.cc rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse gotReply := false gotSettings := false for { f, err := cc.fr.ReadFrame() if err != nil { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(http2StreamError); ok { if cs := cc.streamByID(se.StreamID, true); cs != nil { cs.cc.writeStreamReset(cs.ID, se.Code, err) if se.Cause == nil { se.Cause = cc.fr.errDetail } rl.endStreamError(cs, se) } continue } else if err != nil { return err } if http2VerboseLogs { cc.vlogf("http2: Transport received %s", http2summarizeFrame(f)) } if !gotSettings { if _, ok := f.(*http2SettingsFrame); !ok { cc.logf("protocol error: received %T before a SETTINGS frame", f) return http2ConnectionError(http2ErrCodeProtocol) } gotSettings = true } maybeIdle := false switch f := f.(type) { case *http2MetaHeadersFrame: err = rl.processHeaders(f) maybeIdle = true gotReply = true case *http2DataFrame: err = rl.processData(f) maybeIdle = true case *http2GoAwayFrame: err = rl.processGoAway(f) maybeIdle = true case *http2RSTStreamFrame: err = rl.processResetStream(f) maybeIdle = true case *http2SettingsFrame: err = rl.processSettings(f) case *http2PushPromiseFrame: err = rl.processPushPromise(f) case *http2WindowUpdateFrame: err = rl.processWindowUpdate(f) case *http2PingFrame: err = rl.processPing(f) default: cc.logf("Transport: unhandled response frame type %T", f) } if err != nil { if http2VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, http2summarizeFrame(f), err) } return err } if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { cc.closeIfIdle() } } } func (rl *http2clientConnReadLoop) processHeaders(f *http2MetaHeadersFrame) error { cc := rl.cc cs := cc.streamByID(f.StreamID, f.StreamEnded()) if cs == nil { return nil } if !cs.firstByte { if cs.trace != nil { http2traceFirstResponseByte(cs.trace) } cs.firstByte = true } if !cs.pastHeaders { cs.pastHeaders = true } else { return rl.processTrailers(cs, f) } res, err := rl.handleResponse(cs, f) if err != nil { if _, ok := err.(http2ConnectionError); ok { return err } cs.cc.writeStreamReset(f.StreamID, http2ErrCodeProtocol, err) cs.resc <- http2resAndError{err: err} return nil } if res == nil { return nil } if res.Body != http2noBody { rl.activeRes[cs.ID] = cs } cs.resTrailer = &res.Trailer cs.resc <- http2resAndError{res: res} return nil } // may return error types nil, or ConnectionError. Any other error value // is a StreamError of type ErrCodeProtocol. The returned error in that case // is the detail. // // As a special case, handleResponse may return (nil, nil) to skip the // frame (currently only used for 100 expect continue). This special // case is going away after Issue 13851 is fixed. func (rl *http2clientConnReadLoop) handleResponse(cs *http2clientStream, f *http2MetaHeadersFrame) (*Response, error) { if f.Truncated { return nil, http2errResponseHeaderListSize } status := f.PseudoValue("status") if status == "" { return nil, errors.New("missing status pseudo header") } statusCode, err := strconv.Atoi(status) if err != nil { return nil, errors.New("malformed non-numeric status pseudo header") } if statusCode == 100 { http2traceGot100Continue(cs.trace) if cs.on100 != nil { cs.on100() } cs.pastHeaders = false return nil, nil } header := make(Header) res := &Response{ Proto: "HTTP/2.0", ProtoMajor: 2, Header: header, StatusCode: statusCode, Status: status + " " + StatusText(statusCode), } for _, hf := range f.RegularFields() { key := CanonicalHeaderKey(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { t = make(Header) res.Trailer = t } http2foreachHeaderElement(hf.Value, func(v string) { t[CanonicalHeaderKey(v)] = nil }) } else { header[key] = append(header[key], hf.Value) } } streamEnded := f.StreamEnded() isHead := cs.req.Method == "HEAD" if !streamEnded || isHead { res.ContentLength = -1 if clens := res.Header["Content-Length"]; len(clens) == 1 { if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { res.ContentLength = clen64 } else { } } else if len(clens) > 1 { } } if streamEnded || isHead { res.Body = http2noBody return res, nil } buf := new(bytes.Buffer) cs.bufPipe = http2pipe{b: buf} cs.bytesRemain = res.ContentLength res.Body = http2transportResponseBody{cs} go cs.awaitRequestCancel(cs.req) if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { res.Header.Del("Content-Encoding") res.Header.Del("Content-Length") res.ContentLength = -1 res.Body = &http2gzipReader{body: res.Body} http2setResponseUncompressed(res) } return res, nil } func (rl *http2clientConnReadLoop) processTrailers(cs *http2clientStream, f *http2MetaHeadersFrame) error { if cs.pastTrailers { return http2ConnectionError(http2ErrCodeProtocol) } cs.pastTrailers = true if !f.StreamEnded() { return http2ConnectionError(http2ErrCodeProtocol) } if len(f.PseudoFields()) > 0 { return http2ConnectionError(http2ErrCodeProtocol) } trailer := make(Header) for _, hf := range f.RegularFields() { key := CanonicalHeaderKey(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer rl.endStream(cs) return nil } // transportResponseBody is the concrete type of Transport.RoundTrip's // Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. // On Close it sends RST_STREAM if EOF wasn't already seen. type http2transportResponseBody struct { cs *http2clientStream } func (b http2transportResponseBody) Read(p []byte) (n int, err error) { cs := b.cs cc := cs.cc if cs.readErr != nil { return 0, cs.readErr } n, err = b.cs.bufPipe.Read(p) if cs.bytesRemain != -1 { if int64(n) > cs.bytesRemain { n = int(cs.bytesRemain) if err == nil { err = errors.New("net/http: server replied with more than declared Content-Length; truncated") cc.writeStreamReset(cs.ID, http2ErrCodeProtocol, err) } cs.readErr = err return int(cs.bytesRemain), err } cs.bytesRemain -= int64(n) if err == io.EOF && cs.bytesRemain > 0 { err = io.ErrUnexpectedEOF cs.readErr = err return n, err } } if n == 0 { return } cc.mu.Lock() defer cc.mu.Unlock() var connAdd, streamAdd int32 if v := cc.inflow.available(); v < http2transportDefaultConnFlow/2 { connAdd = http2transportDefaultConnFlow - v cc.inflow.add(connAdd) } if err == nil { v := int(cs.inflow.available()) + cs.bufPipe.Len() if v < http2transportDefaultStreamFlow-http2transportDefaultStreamMinRefresh { streamAdd = int32(http2transportDefaultStreamFlow - v) cs.inflow.add(streamAdd) } } if connAdd != 0 || streamAdd != 0 { cc.wmu.Lock() defer cc.wmu.Unlock() if connAdd != 0 { cc.fr.WriteWindowUpdate(0, http2mustUint31(connAdd)) } if streamAdd != 0 { cc.fr.WriteWindowUpdate(cs.ID, http2mustUint31(streamAdd)) } cc.bw.Flush() } return } var http2errClosedResponseBody = errors.New("http2: response body closed") func (b http2transportResponseBody) Close() error { cs := b.cs cc := cs.cc serverSentStreamEnd := cs.bufPipe.Err() == io.EOF unread := cs.bufPipe.Len() if unread > 0 || !serverSentStreamEnd { cc.mu.Lock() cc.wmu.Lock() if !serverSentStreamEnd { cc.fr.WriteRSTStream(cs.ID, http2ErrCodeCancel) } if unread > 0 { cc.inflow.add(int32(unread)) cc.fr.WriteWindowUpdate(0, uint32(unread)) } cc.bw.Flush() cc.wmu.Unlock() cc.mu.Unlock() } cs.bufPipe.BreakWithError(http2errClosedResponseBody) return nil } func (rl *http2clientConnReadLoop) processData(f *http2DataFrame) error { cc := rl.cc cs := cc.streamByID(f.StreamID, f.StreamEnded()) data := f.Data() if cs == nil { cc.mu.Lock() neverSent := cc.nextStreamID cc.mu.Unlock() if f.StreamID >= neverSent { cc.logf("http2: Transport received unsolicited DATA frame; closing connection") return http2ConnectionError(http2ErrCodeProtocol) } if f.Length > 0 { cc.mu.Lock() cc.inflow.add(int32(f.Length)) cc.mu.Unlock() cc.wmu.Lock() cc.fr.WriteWindowUpdate(0, uint32(f.Length)) cc.bw.Flush() cc.wmu.Unlock() } return nil } if f.Length > 0 { if len(data) > 0 && cs.bufPipe.b == nil { cc.logf("http2: Transport received DATA frame for closed stream; closing connection") return http2ConnectionError(http2ErrCodeProtocol) } cc.mu.Lock() if cs.inflow.available() >= int32(f.Length) { cs.inflow.take(int32(f.Length)) } else { cc.mu.Unlock() return http2ConnectionError(http2ErrCodeFlowControl) } if pad := int32(f.Length) - int32(len(data)); pad > 0 { cs.inflow.add(pad) cc.inflow.add(pad) cc.wmu.Lock() cc.fr.WriteWindowUpdate(0, uint32(pad)) cc.fr.WriteWindowUpdate(cs.ID, uint32(pad)) cc.bw.Flush() cc.wmu.Unlock() } cc.mu.Unlock() if len(data) > 0 { if _, err := cs.bufPipe.Write(data); err != nil { rl.endStreamError(cs, err) return err } } } if f.StreamEnded() { rl.endStream(cs) } return nil } var http2errInvalidTrailers = errors.New("http2: invalid trailers") func (rl *http2clientConnReadLoop) endStream(cs *http2clientStream) { rl.endStreamError(cs, nil) } func (rl *http2clientConnReadLoop) endStreamError(cs *http2clientStream, err error) { var code func() if err == nil { err = io.EOF code = cs.copyTrailers } cs.bufPipe.closeWithErrorAndCode(err, code) delete(rl.activeRes, cs.ID) if http2isConnectionCloseRequest(cs.req) { rl.closeWhenIdle = true } select { case cs.resc <- http2resAndError{err: err}: default: } } func (cs *http2clientStream) copyTrailers() { for k, vv := range cs.trailer { t := cs.resTrailer if *t == nil { *t = make(Header) } (*t)[k] = vv } } func (rl *http2clientConnReadLoop) processGoAway(f *http2GoAwayFrame) error { cc := rl.cc cc.t.connPool().MarkDead(cc) if f.ErrCode != 0 { cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) } cc.setGoAway(f) return nil } func (rl *http2clientConnReadLoop) processSettings(f *http2SettingsFrame) error { cc := rl.cc cc.mu.Lock() defer cc.mu.Unlock() if f.IsAck() { if cc.wantSettingsAck { cc.wantSettingsAck = false return nil } return http2ConnectionError(http2ErrCodeProtocol) } err := f.ForeachSetting(func(s http2Setting) error { switch s.ID { case http2SettingMaxFrameSize: cc.maxFrameSize = s.Val case http2SettingMaxConcurrentStreams: cc.maxConcurrentStreams = s.Val case http2SettingInitialWindowSize: if s.Val > math.MaxInt32 { return http2ConnectionError(http2ErrCodeFlowControl) } delta := int32(s.Val) - int32(cc.initialWindowSize) for _, cs := range cc.streams { cs.flow.add(delta) } cc.cond.Broadcast() cc.initialWindowSize = s.Val default: cc.vlogf("Unhandled Setting: %v", s) } return nil }) if err != nil { return err } cc.wmu.Lock() defer cc.wmu.Unlock() cc.fr.WriteSettingsAck() cc.bw.Flush() return cc.werr } func (rl *http2clientConnReadLoop) processWindowUpdate(f *http2WindowUpdateFrame) error { cc := rl.cc cs := cc.streamByID(f.StreamID, false) if f.StreamID != 0 && cs == nil { return nil } cc.mu.Lock() defer cc.mu.Unlock() fl := &cc.flow if cs != nil { fl = &cs.flow } if !fl.add(int32(f.Increment)) { return http2ConnectionError(http2ErrCodeFlowControl) } cc.cond.Broadcast() return nil } func (rl *http2clientConnReadLoop) processResetStream(f *http2RSTStreamFrame) error { cs := rl.cc.streamByID(f.StreamID, true) if cs == nil { return nil } select { case <-cs.peerReset: default: err := http2streamError(cs.ID, f.ErrCode) cs.resetErr = err close(cs.peerReset) cs.bufPipe.CloseWithError(err) cs.cc.cond.Broadcast() } delete(rl.activeRes, cs.ID) return nil } func (rl *http2clientConnReadLoop) processPing(f *http2PingFrame) error { if f.IsAck() { return nil } cc := rl.cc cc.wmu.Lock() defer cc.wmu.Unlock() if err := cc.fr.WritePing(true, f.Data); err != nil { return err } return cc.bw.Flush() } func (rl *http2clientConnReadLoop) processPushPromise(f *http2PushPromiseFrame) error { return http2ConnectionError(http2ErrCodeProtocol) } func (cc *http2ClientConn) writeStreamReset(streamID uint32, code http2ErrCode, err error) { cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) cc.bw.Flush() cc.wmu.Unlock() } var ( http2errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") http2errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") ) func (cc *http2ClientConn) logf(format string, args ...interface{}) { cc.t.logf(format, args...) } func (cc *http2ClientConn) vlogf(format string, args ...interface{}) { cc.t.vlogf(format, args...) } func (t *http2Transport) vlogf(format string, args ...interface{}) { if http2VerboseLogs { t.logf(format, args...) } } func (t *http2Transport) logf(format string, args ...interface{}) { log.Printf(format, args...) } var http2noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) func http2strSliceContains(ss []string, s string) bool { for _, v := range ss { if v == s { return true } } return false } type http2erringRoundTripper struct{ err error } func (rt http2erringRoundTripper) RoundTrip(*Request) (*Response, error) { return nil, rt.err } // gzipReader wraps a response body so it can lazily // call gzip.NewReader on the first call to Read type http2gzipReader struct { body io.ReadCloser // underlying Response.Body zr *gzip.Reader // lazily-initialized gzip reader zerr error // sticky error } func (gz *http2gzipReader) Read(p []byte) (n int, err error) { if gz.zerr != nil { return 0, gz.zerr } if gz.zr == nil { gz.zr, err = gzip.NewReader(gz.body) if err != nil { gz.zerr = err return 0, err } } return gz.zr.Read(p) } func (gz *http2gzipReader) Close() error { return gz.body.Close() } type http2errorReader struct{ err error } func (r http2errorReader) Read(p []byte) (int, error) { return 0, r.err } // bodyWriterState encapsulates various state around the Transport's writing // of the request body, particularly regarding doing delayed writes of the body // when the request contains "Expect: 100-continue". type http2bodyWriterState struct { cs *http2clientStream timer *time.Timer // if non-nil, we're doing a delayed write fnonce *sync.Once // to call fn with fn func() // the code to run in the goroutine, writing the body resc chan error // result of fn's execution delay time.Duration // how long we should delay a delayed write for } func (t *http2Transport) getBodyWriterState(cs *http2clientStream, body io.Reader) (s http2bodyWriterState) { s.cs = cs if body == nil { return } resc := make(chan error, 1) s.resc = resc s.fn = func() { resc <- cs.writeRequestBody(body, cs.req.Body) } s.delay = t.expectContinueTimeout() if s.delay == 0 || !httplex.HeaderValuesContainsToken( cs.req.Header["Expect"], "100-continue") { return } s.fnonce = new(sync.Once) // Arm the timer with a very large duration, which we'll // intentionally lower later. It has to be large now because // we need a handle to it before writing the headers, but the // s.delay value is defined to not start until after the // request headers were written. const hugeDuration = 365 * 24 * time.Hour s.timer = time.AfterFunc(hugeDuration, func() { s.fnonce.Do(s.fn) }) return } func (s http2bodyWriterState) cancel() { if s.timer != nil { s.timer.Stop() } } func (s http2bodyWriterState) on100() { if s.timer == nil { return } s.timer.Stop() go func() { s.fnonce.Do(s.fn) }() } // scheduleBodyWrite starts writing the body, either immediately (in // the common case) or after the delay timeout. It should not be // called until after the headers have been written. func (s http2bodyWriterState) scheduleBodyWrite() { if s.timer == nil { go s.fn() return } http2traceWait100Continue(s.cs.trace) if s.timer.Stop() { s.timer.Reset(s.delay) } } // isConnectionCloseRequest reports whether req should use its own // connection for a single request and then close the connection. func http2isConnectionCloseRequest(req *Request) bool { return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") } // writeFramer is implemented by any type that is used to write frames. type http2writeFramer interface { writeFrame(http2writeContext) error } // writeContext is the interface needed by the various frame writer // types below. All the writeFrame methods below are scheduled via the // frame writing scheduler (see writeScheduler in writesched.go). // // This interface is implemented by *serverConn. // // TODO: decide whether to a) use this in the client code (which didn't // end up using this yet, because it has a simpler design, not // currently implementing priorities), or b) delete this and // make the server code a bit more concrete. type http2writeContext interface { Framer() *http2Framer Flush() error CloseConn() error // HeaderEncoder returns an HPACK encoder that writes to the // returned buffer. HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) } // endsStream reports whether the given frame writer w will locally // close the stream. func http2endsStream(w http2writeFramer) bool { switch v := w.(type) { case *http2writeData: return v.endStream case *http2writeResHeaders: return v.endStream case nil: panic("endsStream called on nil writeFramer") } return false } type http2flushFrameWriter struct{} func (http2flushFrameWriter) writeFrame(ctx http2writeContext) error { return ctx.Flush() } type http2writeSettings []http2Setting func (s http2writeSettings) writeFrame(ctx http2writeContext) error { return ctx.Framer().WriteSettings([]http2Setting(s)...) } type http2writeGoAway struct { maxStreamID uint32 code http2ErrCode } func (p *http2writeGoAway) writeFrame(ctx http2writeContext) error { err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) if p.code != 0 { ctx.Flush() time.Sleep(50 * time.Millisecond) ctx.CloseConn() } return err } type http2writeData struct { streamID uint32 p []byte endStream bool } func (w *http2writeData) String() string { return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) } func (w *http2writeData) writeFrame(ctx http2writeContext) error { return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) } // handlerPanicRST is the message sent from handler goroutines when // the handler panics. type http2handlerPanicRST struct { StreamID uint32 } func (hp http2handlerPanicRST) writeFrame(ctx http2writeContext) error { return ctx.Framer().WriteRSTStream(hp.StreamID, http2ErrCodeInternal) } func (se http2StreamError) writeFrame(ctx http2writeContext) error { return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) } type http2writePingAck struct{ pf *http2PingFrame } func (w http2writePingAck) writeFrame(ctx http2writeContext) error { return ctx.Framer().WritePing(true, w.pf.Data) } type http2writeSettingsAck struct{} func (http2writeSettingsAck) writeFrame(ctx http2writeContext) error { return ctx.Framer().WriteSettingsAck() } // writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames // for HTTP response headers or trailers from a server handler. type http2writeResHeaders struct { streamID uint32 httpResCode int // 0 means no ":status" line h Header // may be nil trailers []string // if non-nil, which keys of h to write. nil means all. endStream bool date string contentType string contentLength string } func http2encKV(enc *hpack.Encoder, k, v string) { if http2VerboseLogs { log.Printf("http2: server encoding header %q = %q", k, v) } enc.WriteField(hpack.HeaderField{Name: k, Value: v}) } func (w *http2writeResHeaders) writeFrame(ctx http2writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() if w.httpResCode != 0 { http2encKV(enc, ":status", http2httpCodeString(w.httpResCode)) } http2encodeHeaders(enc, w.h, w.trailers) if w.contentType != "" { http2encKV(enc, "content-type", w.contentType) } if w.contentLength != "" { http2encKV(enc, "content-length", w.contentLength) } if w.date != "" { http2encKV(enc, "date", w.date) } headerBlock := buf.Bytes() if len(headerBlock) == 0 && w.trailers == nil { panic("unexpected empty hpack") } // For now we're lazy and just pick the minimum MAX_FRAME_SIZE // that all peers must support (16KB). Later we could care // more and send larger frames if the peer advertised it, but // there's little point. Most headers are small anyway (so we // generally won't have CONTINUATION frames), and extra frames // only waste 9 bytes anyway. const maxFrameSize = 16384 first := true for len(headerBlock) > 0 { frag := headerBlock if len(frag) > maxFrameSize { frag = frag[:maxFrameSize] } headerBlock = headerBlock[len(frag):] endHeaders := len(headerBlock) == 0 var err error if first { first = false err = ctx.Framer().WriteHeaders(http2HeadersFrameParam{ StreamID: w.streamID, BlockFragment: frag, EndStream: w.endStream, EndHeaders: endHeaders, }) } else { err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag) } if err != nil { return err } } return nil } type http2write100ContinueHeadersFrame struct { streamID uint32 } func (w http2write100ContinueHeadersFrame) writeFrame(ctx http2writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() http2encKV(enc, ":status", "100") return ctx.Framer().WriteHeaders(http2HeadersFrameParam{ StreamID: w.streamID, BlockFragment: buf.Bytes(), EndStream: false, EndHeaders: true, }) } type http2writeWindowUpdate struct { streamID uint32 // or 0 for conn-level n uint32 } func (wu http2writeWindowUpdate) writeFrame(ctx http2writeContext) error { return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) } func http2encodeHeaders(enc *hpack.Encoder, h Header, keys []string) { if keys == nil { sorter := http2sorterPool.Get().(*http2sorter) defer http2sorterPool.Put(sorter) keys = sorter.Keys(h) } for _, k := range keys { vv := h[k] k = http2lowerHeader(k) if !http2validWireHeaderFieldName(k) { continue } isTE := k == "transfer-encoding" for _, v := range vv { if !httplex.ValidHeaderFieldValue(v) { continue } if isTE && v != "trailers" { continue } http2encKV(enc, k, v) } } } // frameWriteMsg is a request to write a frame. type http2frameWriteMsg struct { // write is the interface value that does the writing, once the // writeScheduler (below) has decided to select this frame // to write. The write functions are all defined in write.go. write http2writeFramer stream *http2stream // used for prioritization. nil for non-stream frames. // done, if non-nil, must be a buffered channel with space for // 1 message and is sent the return value from write (or an // earlier error) when the frame has been written. done chan error } // for debugging only: func (wm http2frameWriteMsg) String() string { var streamID uint32 if wm.stream != nil { streamID = wm.stream.id } var des string if s, ok := wm.write.(fmt.Stringer); ok { des = s.String() } else { des = fmt.Sprintf("%T", wm.write) } return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des) } // writeScheduler tracks pending frames to write, priorities, and decides // the next one to use. It is not thread-safe. type http2writeScheduler struct { // zero are frames not associated with a specific stream. // They're sent before any stream-specific freams. zero http2writeQueue // maxFrameSize is the maximum size of a DATA frame // we'll write. Must be non-zero and between 16K-16M. maxFrameSize uint32 // sq contains the stream-specific queues, keyed by stream ID. // when a stream is idle, it's deleted from the map. sq map[uint32]*http2writeQueue // canSend is a slice of memory that's reused between frame // scheduling decisions to hold the list of writeQueues (from sq) // which have enough flow control data to send. After canSend is // built, the best is selected. canSend []*http2writeQueue // pool of empty queues for reuse. queuePool []*http2writeQueue } func (ws *http2writeScheduler) putEmptyQueue(q *http2writeQueue) { if len(q.s) != 0 { panic("queue must be empty") } ws.queuePool = append(ws.queuePool, q) } func (ws *http2writeScheduler) getEmptyQueue() *http2writeQueue { ln := len(ws.queuePool) if ln == 0 { return new(http2writeQueue) } q := ws.queuePool[ln-1] ws.queuePool = ws.queuePool[:ln-1] return q } func (ws *http2writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 } func (ws *http2writeScheduler) add(wm http2frameWriteMsg) { st := wm.stream if st == nil { ws.zero.push(wm) } else { ws.streamQueue(st.id).push(wm) } } func (ws *http2writeScheduler) streamQueue(streamID uint32) *http2writeQueue { if q, ok := ws.sq[streamID]; ok { return q } if ws.sq == nil { ws.sq = make(map[uint32]*http2writeQueue) } q := ws.getEmptyQueue() ws.sq[streamID] = q return q } // take returns the most important frame to write and removes it from the scheduler. // It is illegal to call this if the scheduler is empty or if there are no connection-level // flow control bytes available. func (ws *http2writeScheduler) take() (wm http2frameWriteMsg, ok bool) { if ws.maxFrameSize == 0 { panic("internal error: ws.maxFrameSize not initialized or invalid") } if !ws.zero.empty() { return ws.zero.shift(), true } if len(ws.sq) == 0 { return } for id, q := range ws.sq { if q.firstIsNoCost() { return ws.takeFrom(id, q) } } if len(ws.canSend) != 0 { panic("should be empty") } for _, q := range ws.sq { if n := ws.streamWritableBytes(q); n > 0 { ws.canSend = append(ws.canSend, q) } } if len(ws.canSend) == 0 { return } defer ws.zeroCanSend() q := ws.canSend[0] return ws.takeFrom(q.streamID(), q) } // zeroCanSend is defered from take. func (ws *http2writeScheduler) zeroCanSend() { for i := range ws.canSend { ws.canSend[i] = nil } ws.canSend = ws.canSend[:0] } // streamWritableBytes returns the number of DATA bytes we could write // from the given queue's stream, if this stream/queue were // selected. It is an error to call this if q's head isn't a // *writeData. func (ws *http2writeScheduler) streamWritableBytes(q *http2writeQueue) int32 { wm := q.head() ret := wm.stream.flow.available() if ret == 0 { return 0 } if int32(ws.maxFrameSize) < ret { ret = int32(ws.maxFrameSize) } if ret == 0 { panic("internal error: ws.maxFrameSize not initialized or invalid") } wd := wm.write.(*http2writeData) if len(wd.p) < int(ret) { ret = int32(len(wd.p)) } return ret } func (ws *http2writeScheduler) takeFrom(id uint32, q *http2writeQueue) (wm http2frameWriteMsg, ok bool) { wm = q.head() if wd, ok := wm.write.(*http2writeData); ok && len(wd.p) > 0 { allowed := wm.stream.flow.available() if allowed == 0 { return http2frameWriteMsg{}, false } if int32(ws.maxFrameSize) < allowed { allowed = int32(ws.maxFrameSize) } if len(wd.p) > int(allowed) { wm.stream.flow.take(allowed) chunk := wd.p[:allowed] wd.p = wd.p[allowed:] return http2frameWriteMsg{ stream: wm.stream, write: &http2writeData{ streamID: wd.streamID, p: chunk, endStream: false, }, done: nil, }, true } wm.stream.flow.take(int32(len(wd.p))) } q.shift() if q.empty() { ws.putEmptyQueue(q) delete(ws.sq, id) } return wm, true } func (ws *http2writeScheduler) forgetStream(id uint32) { q, ok := ws.sq[id] if !ok { return } delete(ws.sq, id) for i := range q.s { q.s[i] = http2frameWriteMsg{} } q.s = q.s[:0] ws.putEmptyQueue(q) } type http2writeQueue struct { s []http2frameWriteMsg } // streamID returns the stream ID for a non-empty stream-specific queue. func (q *http2writeQueue) streamID() uint32 { return q.s[0].stream.id } func (q *http2writeQueue) empty() bool { return len(q.s) == 0 } func (q *http2writeQueue) push(wm http2frameWriteMsg) { q.s = append(q.s, wm) } // head returns the next item that would be removed by shift. func (q *http2writeQueue) head() http2frameWriteMsg { if len(q.s) == 0 { panic("invalid use of queue") } return q.s[0] } func (q *http2writeQueue) shift() http2frameWriteMsg { if len(q.s) == 0 { panic("invalid use of queue") } wm := q.s[0] copy(q.s, q.s[1:]) q.s[len(q.s)-1] = http2frameWriteMsg{} q.s = q.s[:len(q.s)-1] return wm } func (q *http2writeQueue) firstIsNoCost() bool { if df, ok := q.s[0].write.(*http2writeData); ok { return len(df.p) == 0 } return true }
[ "\"DEBUG_HTTP2_GOROUTINES\"", "\"GODEBUG\"" ]
[]
[ "GODEBUG", "DEBUG_HTTP2_GOROUTINES" ]
[]
["GODEBUG", "DEBUG_HTTP2_GOROUTINES"]
go
2
0
packet/models.go
package packet import ( "encoding/json" "net" "os" "time" "github.com/pkg/errors" "github.com/tinkerbell/boots/files/ignition" ) // models.go contains the Hardware structures matching the data models defined by tink and cacher // BondingMode is the hardware bonding mode type BondingMode int // Discovery interface is the base for cacher and tinkerbell hardware discovery type Discovery interface { Instance() *Instance MAC() net.HardwareAddr Mode() string GetIP(addr net.HardwareAddr) IP GetMAC(ip net.IP) net.HardwareAddr DnsServers() []net.IP LeaseTime(mac net.HardwareAddr) time.Duration Hostname() (string, error) Hardware() *Hardware SetMAC(mac net.HardwareAddr) } // DiscoveryCacher presents the structure for old data model type DiscoveryCacher struct { *HardwareCacher mac net.HardwareAddr } // DiscoveryTinkerbellV1 presents the structure for tinkerbell's new data model, version 1 type DiscoveryTinkerbellV1 struct { *HardwareTinkerbellV1 mac net.HardwareAddr } // Interface is the base for cacher and tinkerbell hardware (network) interface type Interface interface { } type InterfaceCacher struct { *Port } type InterfaceTinkerbell struct { *NetworkInterface } // Hardware interface holds primary hardware methods type Hardware interface { HardwareAllowPXE(mac net.HardwareAddr) bool HardwareAllowWorkflow(mac net.HardwareAddr) bool HardwareArch(mac net.HardwareAddr) string HardwareBondingMode() BondingMode HardwareFacilityCode() string HardwareID() string HardwareIPs() []IP Interfaces() []Port // TODO: to be updated HardwareManufacturer() string HardwarePlanSlug() string HardwarePlanVersionSlug() string HardwareState() HardwareState HardwareServicesVersion() string HardwareUEFI(mac net.HardwareAddr) bool OsieBaseURL(mac net.HardwareAddr) string KernelPath(mac net.HardwareAddr) string InitrdPath(mac net.HardwareAddr) string } // HardwareCacher represents the old hardware data model for backward compatibility type HardwareCacher struct { ID string `json:"id"` Name string `json:"name"` State HardwareState `json:"state"` BondingMode BondingMode `json:"bonding_mode"` NetworkPorts []Port `json:"network_ports"` Manufacturer Manufacturer `json:"manufacturer"` PlanSlug string `json:"plan_slug"` PlanVersionSlug string `json:"plan_version_slug"` Arch string `json:"arch"` FacilityCode string `json:"facility_code"` IPMI IP `json:"management"` IPs []IP `json:"ip_addresses"` PreinstallOS OperatingSystem `json:"preinstalled_operating_system_version"` PrivateSubnets []string `json:"private_subnets,omitempty"` UEFI bool `json:"efi_boot"` AllowPXE bool `json:"allow_pxe"` AllowWorkflow bool `json:"allow_workflow"` ServicesVersion ServicesVersion `json:"services"` Instance *Instance `json:"instance"` } // HardwareTinkerbellV1 represents the new hardware data model for tinkerbell, version 1 type HardwareTinkerbellV1 struct { ID string `json:"id"` Network Network `json:"network"` Metadata Metadata `json:"metadata"` } // NewDiscovery instantiates a Discovery struct from the json argument func NewDiscovery(b []byte) (*Discovery, error) { var res Discovery if string(b) == "" || string(b) == "{}" { return nil, errors.New("empty response from db") } dataModelVersion := os.Getenv("DATA_MODEL_VERSION") switch dataModelVersion { case "1": res = &DiscoveryTinkerbellV1{} default: res = &DiscoveryCacher{} } err := json.Unmarshal(b, &res) if err != nil { return nil, errors.Wrap(err, "unmarshal json for discovery") } return &res, err } // Instance models the instance data as returned by the API type Instance struct { ID string `json:"id"` State InstanceState `json:"state"` Hostname string `json:"hostname"` AllowPXE bool `json:"allow_pxe"` Rescue bool `json:"rescue"` OS OperatingSystem `json:"operating_system_version"` AlwaysPXE bool `json:"always_pxe,omitempty"` IPXEScriptURL string `json:"ipxe_script_url,omitempty"` IPs []IP `json:"ip_addresses"` UserData string `json:"userdata,omitempty"` // Only returned in the first 24 hours CryptedRootPassword string `json:"crypted_root_password,omitempty"` Tags []string `json:"tags,omitempty"` // Project Storage ignition.Storage `json:"storage,omitempty"` SSHKeys []string `json:"ssh_keys,omitempty"` // CustomData NetworkReady bool `json:"network_ready,omitempty"` } // Device Full device result from /devices endpoint type Device struct { ID string `json:"id"` } // FindIP returns IP for an instance, nil otherwise func (i *Instance) FindIP(pred func(IP) bool) *IP { for _, ip := range i.IPs { if pred(ip) { return &ip } } return nil } func managementPublicIPv4IP(ip IP) bool { return ip.Public && ip.Management && ip.Family == 4 } func managementPrivateIPv4IP(ip IP) bool { return !ip.Public && ip.Management && ip.Family == 4 } // InstanceState represents the state of an instance (e.g. active) type InstanceState string type Event struct { Type string `json:"type"` Body string `json:"body,omitempty"` Private bool `json:"private"` } type UserEvent struct { Code string `json:"code"` State string `json:"state"` Message string `json:"message"` } type ServicesVersion struct { Osie string `json:"osie"` } // HardwareState is the hardware state (e.g. provisioning) type HardwareState string // IP represents IP address for a hardware type IP struct { Address net.IP `json:"address"` Netmask net.IP `json:"netmask"` Gateway net.IP `json:"gateway"` Family int `json:"address_family"` Public bool `json:"public"` Management bool `json:"management"` } // type NetworkPorts struct { // Main []Port `json:"main"` // IPMI Port `json:"ipmi"` // } // unused, but keeping for now // func (p *NetworkPorts) addMain(port Port) { // var ( // mac = port.MAC() // ports = p.Main // ) // n := len(ports) // i := sort.Search(n, func(i int) bool { // return bytes.Compare(mac, ports[i].MAC()) < 0 // }) // if i < n { // ports = append(append(ports[:i], port), ports[i:]...) // } else { // ports = append(ports, port) // } // p.Main = ports // } // OperatingSystem holds details for the operating system type OperatingSystem struct { Slug string `json:"slug"` Distro string `json:"distro"` Version string `json:"version"` ImageTag string `json:"image_tag"` OsSlug string `json:"os_slug"` } // Port represents a network port type Port struct { ID string `json:"id"` Type PortType `json:"type"` Name string `json:"name"` Data struct { MAC *MACAddr `json:"mac"` Bond string `json:"bond"` } `json:"data"` } // MAC returns the physical hardware address, nil otherwise func (p *Port) MAC() net.HardwareAddr { if p.Data.MAC != nil && *p.Data.MAC != ZeroMAC { return p.Data.MAC.HardwareAddr() } return nil } // PortType is type for a network port type PortType string // Manufacturer holds data for hardware manufacturer type Manufacturer struct { ID string `json:"id"` Slug string `json:"slug"` } type NetworkInterface struct { DHCP DHCP `json:"dhcp,omitempty"` Netboot Netboot `json:"netboot,omitempty"` } // DHCP holds details for DHCP connection type DHCP struct { MAC *MACAddr `json:"mac"` IP IP `json:"ip"` Hostname string `json:"hostname"` LeaseTime time.Duration `json:"lease_time"` NameServers []string `json:"name_servers"` TimeServers []string `json:"time_servers"` Arch string `json:"arch"` UEFI bool `json:"uefi"` IfaceName string `json:"iface_name"` // to be removed? } // Netboot holds details for a hardware to boot over network type Netboot struct { AllowPXE bool `json:"allow_pxe"` // to be removed? AllowWorkflow bool `json:"allow_workflow"` // to be removed? IPXE struct { URL string `json:"url"` Contents string `json:"contents"` } `json:"ipxe"` Osie Osie `json:"osie"` } // Bootstrapper is the bootstrapper to be used during netboot type Osie struct { BaseURL string `json:"base_url"` Kernel string `json:"kernel"` Initrd string `json:"initrd"` } // Network holds hardware network details type Network struct { Interfaces []NetworkInterface `json:"interfaces,omitempty"` //Default NetworkInterface `json:"default,omitempty"` } // Metadata holds the hardware metadata type Metadata struct { State HardwareState `json:"state"` BondingMode BondingMode `json:"bonding_mode"` Manufacturer Manufacturer `json:"manufacturer"` Instance *Instance `json:"instance"` Custom struct { PreinstalledOS OperatingSystem `json:"preinstalled_operating_system_version"` PrivateSubnets []string `json:"private_subnets"` } `json:"custom"` Facility Facility `json:"facility"` } // Facility represents the facilty in use type Facility struct { PlanSlug string `json:"plan_slug"` PlanVersionSlug string `json:"plan_version_slug"` FacilityCode string `json:"facility_code"` }
[ "\"DATA_MODEL_VERSION\"" ]
[]
[ "DATA_MODEL_VERSION" ]
[]
["DATA_MODEL_VERSION"]
go
1
0
rinex_webservice/asgi.py
""" ASGI config for rinex_webservice project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rinex_webservice.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
test/e2e/run_test.go
package integration import ( "fmt" "io/ioutil" "net" "os" "os/exec" "path/filepath" "strconv" "strings" "syscall" "time" "github.com/containers/common/pkg/cgroups" "github.com/containers/podman/v4/pkg/rootless" . "github.com/containers/podman/v4/test/utils" "github.com/containers/storage/pkg/stringid" "github.com/mrunalp/fileutils" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman run", func() { var ( tempdir string err error podmanTest *PodmanTestIntegration ) BeforeEach(func() { tempdir, err = CreateTempDirInTempDir() if err != nil { os.Exit(1) } podmanTest = PodmanTestCreate(tempdir) podmanTest.Setup() podmanTest.SeedImages() }) AfterEach(func() { podmanTest.Cleanup() f := CurrentGinkgoTestDescription() processTestResult(f) }) It("podman run a container based on local image", func() { session := podmanTest.Podman([]string{"run", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) }) It("podman run check /run/.containerenv", func() { session := podmanTest.Podman([]string{"run", ALPINE, "cat", "/run/.containerenv"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal("")) session = podmanTest.Podman([]string{"run", "--privileged", "--name=test1", ALPINE, "cat", "/run/.containerenv"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("name=\"test1\"")) Expect(session.OutputToString()).To(ContainSubstring("image=\"" + ALPINE + "\"")) session = podmanTest.Podman([]string{"run", "-v", "/:/host", ALPINE, "cat", "/run/.containerenv"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("graphRootMounted=1")) session = podmanTest.Podman([]string{"run", "-v", "/:/host", "--privileged", ALPINE, "cat", "/run/.containerenv"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("graphRootMounted=1")) }) It("podman run a container based on a complex local image name", func() { imageName := strings.TrimPrefix(nginx, "quay.io/") session := podmanTest.Podman([]string{"run", imageName, "ls"}) session.WaitWithDefaultTimeout() Expect(session.ErrorToString()).ToNot(ContainSubstring("Trying to pull")) Expect(session).Should(Exit(0)) }) It("podman run --signature-policy", func() { session := podmanTest.Podman([]string{"run", "--pull=always", "--signature-policy", "/no/such/file", ALPINE}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) session = podmanTest.Podman([]string{"run", "--pull=always", "--signature-policy", "/etc/containers/policy.json", ALPINE}) session.WaitWithDefaultTimeout() if IsRemote() { Expect(session).To(ExitWithError()) Expect(session.ErrorToString()).To(ContainSubstring("unknown flag")) } else { Expect(session).Should(Exit(0)) } }) It("podman run --rm with --restart", func() { session := podmanTest.Podman([]string{"run", "--rm", "--restart", "", ALPINE}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--rm", "--restart", "no", ALPINE}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--rm", "--restart", "on-failure", ALPINE}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--rm", "--restart", "always", ALPINE}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) session = podmanTest.Podman([]string{"run", "--rm", "--restart", "unless-stopped", ALPINE}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) }) It("podman run a container based on on a short name with localhost", func() { tag := podmanTest.Podman([]string{"tag", nginx, "localhost/libpod/alpine_nginx:latest"}) tag.WaitWithDefaultTimeout() rmi := podmanTest.Podman([]string{"rmi", nginx}) rmi.WaitWithDefaultTimeout() session := podmanTest.Podman([]string{"run", "libpod/alpine_nginx:latest", "ls"}) session.WaitWithDefaultTimeout() Expect(session.ErrorToString()).ToNot(ContainSubstring("Trying to pull")) Expect(session).Should(Exit(0)) }) It("podman container run a container based on on a short name with localhost", func() { tag := podmanTest.Podman([]string{"image", "tag", nginx, "localhost/libpod/alpine_nginx:latest"}) tag.WaitWithDefaultTimeout() rmi := podmanTest.Podman([]string{"image", "rm", nginx}) rmi.WaitWithDefaultTimeout() session := podmanTest.Podman([]string{"container", "run", "libpod/alpine_nginx:latest", "ls"}) session.WaitWithDefaultTimeout() Expect(session.ErrorToString()).ToNot(ContainSubstring("Trying to pull")) Expect(session).Should(Exit(0)) }) It("podman run a container based on local image with short options", func() { session := podmanTest.Podman([]string{"run", "-dt", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) }) It("podman run a container based on local image with short options and args", func() { // regression test for #714 session := podmanTest.Podman([]string{"run", ALPINE, "find", "/etc", "-name", "hosts"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("/etc/hosts")) }) It("podman create pod with name in /etc/hosts", func() { name := "test_container" hostname := "test_hostname" session := podmanTest.Podman([]string{"run", "-ti", "--rm", "--name", name, "--hostname", hostname, ALPINE, "cat", "/etc/hosts"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring(name)) Expect(session.OutputToString()).To(ContainSubstring(hostname)) }) It("podman run a container based on remote image", func() { // Changing session to rsession rsession := podmanTest.Podman([]string{"run", "-dt", ALPINE, "ls"}) rsession.WaitWithDefaultTimeout() Expect(rsession).Should(Exit(0)) lock := GetPortLock("5000") defer lock.Unlock() session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) if !WaitContainerReady(podmanTest, "registry", "listening on", 20, 1) { Skip("Cannot start docker registry.") } run := podmanTest.Podman([]string{"run", "--tls-verify=false", ALPINE}) run.WaitWithDefaultTimeout() Expect(run).Should(Exit(0)) Expect(podmanTest.NumberOfContainers()).To(Equal(3)) // Now registries.conf will be consulted where localhost:5000 // is set to be insecure. run = podmanTest.Podman([]string{"run", ALPINE}) run.WaitWithDefaultTimeout() Expect(run).Should(Exit(0)) }) It("podman run a container with a --rootfs", func() { rootfs := filepath.Join(tempdir, "rootfs") uls := filepath.Join("/", "usr", "local", "share") uniqueString := stringid.GenerateNonCryptoID() testFilePath := filepath.Join(uls, uniqueString) tarball := filepath.Join(tempdir, "rootfs.tar") err := os.Mkdir(rootfs, 0770) Expect(err).Should(BeNil()) // Change image in predictable way to validate export csession := podmanTest.Podman([]string{"run", "--name", uniqueString, ALPINE, "/bin/sh", "-c", fmt.Sprintf("echo %s > %s", uniqueString, testFilePath)}) csession.WaitWithDefaultTimeout() Expect(csession).Should(Exit(0)) // Export from working container image guarantees working root esession := podmanTest.Podman([]string{"export", "--output", tarball, uniqueString}) esession.WaitWithDefaultTimeout() Expect(esession).Should(Exit(0)) Expect(tarball).Should(BeARegularFile()) // N/B: This will loose any extended attributes like SELinux types fmt.Fprintf(os.Stderr, "Extracting container root tarball\n") tarsession := SystemExec("tar", []string{"xf", tarball, "-C", rootfs}) Expect(tarsession).Should(Exit(0)) Expect(filepath.Join(rootfs, uls)).Should(BeADirectory()) // Other tests confirm SELinux types, just confirm --rootfs is working. session := podmanTest.Podman([]string{"run", "-i", "--security-opt", "label=disable", "--rootfs", rootfs, "cat", testFilePath}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) // Validate changes made in original container and export stdoutLines := session.OutputToStringArray() Expect(stdoutLines).Should(HaveLen(1)) Expect(stdoutLines[0]).Should(Equal(uniqueString)) SkipIfRemote("External overlay only work locally") if os.Getenv("container") != "" { Skip("Overlay mounts not supported when running in a container") } if rootless.IsRootless() { if _, err := exec.LookPath("fuse-overlayfs"); err != nil { Skip("Fuse-Overlayfs required for rootless overlay mount test") } } // Test --rootfs with an external overlay // use --rm to remove container and confirm if we did not leak anything osession := podmanTest.Podman([]string{"run", "-i", "--rm", "--security-opt", "label=disable", "--rootfs", rootfs + ":O", "cat", testFilePath}) osession.WaitWithDefaultTimeout() Expect(osession).Should(Exit(0)) // Test podman start stop with overlay osession = podmanTest.Podman([]string{"run", "--name", "overlay-foo", "--security-opt", "label=disable", "--rootfs", rootfs + ":O", "echo", "hello"}) osession.WaitWithDefaultTimeout() Expect(osession).Should(Exit(0)) osession = podmanTest.Podman([]string{"stop", "overlay-foo"}) osession.WaitWithDefaultTimeout() Expect(osession).Should(Exit(0)) startsession := podmanTest.Podman([]string{"start", "--attach", "overlay-foo"}) startsession.WaitWithDefaultTimeout() Expect(startsession).Should(Exit(0)) Expect(startsession.OutputToString()).To(Equal("hello")) // remove container for above test overlay-foo osession = podmanTest.Podman([]string{"rm", "overlay-foo"}) osession.WaitWithDefaultTimeout() Expect(osession).Should(Exit(0)) // Test --rootfs with an external overlay with --uidmap osession = podmanTest.Podman([]string{"run", "--uidmap", "0:1000:1000", "--rm", "--security-opt", "label=disable", "--rootfs", rootfs + ":O", "echo", "hello"}) osession.WaitWithDefaultTimeout() Expect(osession).Should(Exit(0)) Expect(osession.OutputToString()).To(Equal("hello")) }) It("podman run a container with --init", func() { session := podmanTest.Podman([]string{"run", "--name", "test", "--init", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) result := podmanTest.Podman([]string{"inspect", "test"}) result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) conData := result.InspectContainerToJSON() Expect(conData[0].Path).To(Equal("/dev/init")) Expect(conData[0].Config.Annotations).To(HaveKeyWithValue("io.podman.annotations.init", "TRUE")) }) It("podman run a container with --init and --init-path", func() { session := podmanTest.Podman([]string{"run", "--name", "test", "--init", "--init-path", "/usr/libexec/podman/catatonit", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) result := podmanTest.Podman([]string{"inspect", "test"}) result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) conData := result.InspectContainerToJSON() Expect(conData[0].Path).To(Equal("/dev/init")) Expect(conData[0].Config.Annotations).To(HaveKeyWithValue("io.podman.annotations.init", "TRUE")) }) It("podman run a container without --init", func() { session := podmanTest.Podman([]string{"run", "--name", "test", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) result := podmanTest.Podman([]string{"inspect", "test"}) result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) conData := result.InspectContainerToJSON() Expect(conData[0].Path).To(Equal("ls")) Expect(conData[0].Config.Annotations).To(HaveKeyWithValue("io.podman.annotations.init", "FALSE")) }) forbidGetCWDSeccompProfile := func() string { in := []byte(`{"defaultAction":"SCMP_ACT_ALLOW","syscalls":[{"name":"getcwd","action":"SCMP_ACT_ERRNO"}]}`) jsonFile, err := podmanTest.CreateSeccompJSON(in) if err != nil { fmt.Println(err) Skip("Failed to prepare seccomp.json for test.") } return jsonFile } It("podman run mask and unmask path test", func() { session := podmanTest.Podman([]string{"run", "-d", "--name=maskCtr1", "--security-opt", "unmask=ALL", "--security-opt", "mask=/proc/acpi", ALPINE, "sleep", "200"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"exec", "maskCtr1", "ls", "/sys/firmware"}) session.WaitWithDefaultTimeout() Expect(session.OutputToString()).To(Not(BeEmpty())) Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"exec", "maskCtr1", "ls", "/proc/acpi"}) session.WaitWithDefaultTimeout() Expect(session.OutputToString()).To(BeEmpty()) session = podmanTest.Podman([]string{"run", "-d", "--name=maskCtr2", "--security-opt", "unmask=/proc/acpi:/sys/firmware", ALPINE, "sleep", "200"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"exec", "maskCtr2", "ls", "/sys/firmware"}) session.WaitWithDefaultTimeout() Expect(session.OutputToString()).To(Not(BeEmpty())) Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"exec", "maskCtr2", "ls", "/proc/acpi"}) session.WaitWithDefaultTimeout() Expect(session.OutputToString()).To(Not(BeEmpty())) Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "-d", "--name=maskCtr3", "--security-opt", "mask=/sys/power/disk", ALPINE, "sleep", "200"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"exec", "maskCtr3", "cat", "/sys/power/disk"}) session.WaitWithDefaultTimeout() Expect(session.OutputToString()).To(BeEmpty()) Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "-d", "--name=maskCtr4", "--security-opt", "systempaths=unconfined", ALPINE, "sleep", "200"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"exec", "maskCtr4", "ls", "/sys/firmware"}) session.WaitWithDefaultTimeout() Expect(session.OutputToString()).To(Not(BeEmpty())) Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "-d", "--name=maskCtr5", "--security-opt", "systempaths=unconfined", ALPINE, "grep", "/proc", "/proc/self/mounts"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToStringArray()).Should(HaveLen(1)) session = podmanTest.Podman([]string{"run", "-d", "--security-opt", "unmask=/proc/*", ALPINE, "grep", "/proc", "/proc/self/mounts"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToStringArray()).Should(HaveLen(1)) session = podmanTest.Podman([]string{"run", "--security-opt", "unmask=/proc/a*", ALPINE, "ls", "/proc/acpi"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Not(BeEmpty())) }) It("podman run security-opt unmask on /sys/fs/cgroup", func() { SkipIfCgroupV1("podman umask on /sys/fs/cgroup will fail with cgroups V1") SkipIfRootless("/sys/fs/cgroup rw access is needed") rwOnCgroups := "/sys/fs/cgroup cgroup2 rw" session := podmanTest.Podman([]string{"run", "--security-opt", "unmask=ALL", "--security-opt", "mask=/sys/fs/cgroup", ALPINE, "cat", "/proc/mounts"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring(rwOnCgroups)) session = podmanTest.Podman([]string{"run", "--security-opt", "unmask=/sys/fs/cgroup", ALPINE, "cat", "/proc/mounts"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring(rwOnCgroups)) session = podmanTest.Podman([]string{"run", "--security-opt", "unmask=/sys/fs/cgroup///", ALPINE, "cat", "/proc/mounts"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring(rwOnCgroups)) session = podmanTest.Podman([]string{"run", "--security-opt", "unmask=ALL", ALPINE, "cat", "/proc/mounts"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring(rwOnCgroups)) session = podmanTest.Podman([]string{"run", "--security-opt", "unmask=/sys/fs/cgroup", "--security-opt", "mask=/sys/fs/cgroup", ALPINE, "cat", "/proc/mounts"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring(rwOnCgroups)) session = podmanTest.Podman([]string{"run", "--security-opt", "unmask=/sys/fs/cgroup", ALPINE, "ls", "/sys/fs/cgroup"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).ToNot(BeEmpty()) }) It("podman run seccomp test", func() { session := podmanTest.Podman([]string{"run", "-it", "--security-opt", strings.Join([]string{"seccomp=", forbidGetCWDSeccompProfile()}, ""), ALPINE, "pwd"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) Expect(session.OutputToString()).To(ContainSubstring("Operation not permitted")) }) It("podman run seccomp test --privileged", func() { session := podmanTest.Podman([]string{"run", "-it", "--privileged", "--security-opt", strings.Join([]string{"seccomp=", forbidGetCWDSeccompProfile()}, ""), ALPINE, "pwd"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) Expect(session.OutputToString()).To(ContainSubstring("Operation not permitted")) }) It("podman run seccomp test --privileged no profile should be unconfined", func() { session := podmanTest.Podman([]string{"run", "-it", "--privileged", ALPINE, "grep", "Seccomp", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session.OutputToString()).To(ContainSubstring("0")) Expect(session).Should(Exit(0)) }) It("podman run seccomp test no profile should be default", func() { session := podmanTest.Podman([]string{"run", "-it", ALPINE, "grep", "Seccomp", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session.OutputToString()).To(ContainSubstring("2")) Expect(session).Should(Exit(0)) }) It("podman run capabilities test", func() { session := podmanTest.Podman([]string{"run", "--rm", "--cap-add", "all", ALPINE, "cat", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--rm", "--cap-add", "sys_admin", ALPINE, "cat", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--rm", "--cap-drop", "all", ALPINE, "cat", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--rm", "--cap-drop", "setuid", ALPINE, "cat", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) }) It("podman run user capabilities test", func() { // We need to ignore the containers.conf on the test distribution for this test os.Setenv("CONTAINERS_CONF", "/dev/null") if IsRemote() { podmanTest.RestartRemoteService() } session := podmanTest.Podman([]string{"run", "--rm", "--user", "bin", ALPINE, "grep", "CapBnd", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) session = podmanTest.Podman([]string{"run", "--rm", "--user", "bin", ALPINE, "grep", "CapEff", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("0000000000000000")) session = podmanTest.Podman([]string{"run", "--rm", "--user", "bin", ALPINE, "grep", "CapInh", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("0000000000000000")) session = podmanTest.Podman([]string{"run", "--rm", "--user", "root", ALPINE, "grep", "CapBnd", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) session = podmanTest.Podman([]string{"run", "--rm", "--user", "root", ALPINE, "grep", "CapEff", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) session = podmanTest.Podman([]string{"run", "--rm", "--user", "root", ALPINE, "grep", "CapInh", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) session = podmanTest.Podman([]string{"run", "--rm", ALPINE, "grep", "CapBnd", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) session = podmanTest.Podman([]string{"run", "--rm", ALPINE, "grep", "CapEff", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) session = podmanTest.Podman([]string{"run", "--user=1000:1000", "--cap-add=DAC_OVERRIDE", "--rm", ALPINE, "grep", "CapAmb", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("0000000000000002")) session = podmanTest.Podman([]string{"run", "--user=1000:1000", "--cap-add=DAC_OVERRIDE", "--rm", ALPINE, "grep", "CapInh", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("0000000000000002")) session = podmanTest.Podman([]string{"run", "--user=0", "--cap-add=DAC_OVERRIDE", "--rm", ALPINE, "grep", "CapAmb", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("0000000000000000")) session = podmanTest.Podman([]string{"run", "--user=0:0", "--cap-add=DAC_OVERRIDE", "--rm", ALPINE, "grep", "CapAmb", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("0000000000000000")) session = podmanTest.Podman([]string{"run", "--user=0:0", "--cap-add=DAC_OVERRIDE", "--rm", ALPINE, "grep", "CapInh", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) if os.Geteuid() > 0 { if os.Getenv("SKIP_USERNS") != "" { Skip("Skip userns tests.") } if _, err := os.Stat("/proc/self/uid_map"); err != nil { Skip("User namespaces not supported.") } session = podmanTest.Podman([]string{"run", "--userns=keep-id", "--cap-add=DAC_OVERRIDE", "--rm", ALPINE, "grep", "CapAmb", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("0000000000000002")) session = podmanTest.Podman([]string{"run", "--userns=keep-id", "--privileged", "--rm", ALPINE, "grep", "CapInh", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("0000000000000000")) session = podmanTest.Podman([]string{"run", "--userns=keep-id", "--cap-add=DAC_OVERRIDE", "--rm", ALPINE, "grep", "CapInh", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("0000000000000002")) } }) It("podman run user capabilities test with image", func() { // We need to ignore the containers.conf on the test distribution for this test os.Setenv("CONTAINERS_CONF", "/dev/null") if IsRemote() { podmanTest.RestartRemoteService() } dockerfile := fmt.Sprintf(`FROM %s USER bin`, BB) podmanTest.BuildImage(dockerfile, "test", "false") session := podmanTest.Podman([]string{"run", "--rm", "--user", "bin", "test", "grep", "CapBnd", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) session = podmanTest.Podman([]string{"run", "--rm", "--user", "bin", "test", "grep", "CapEff", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("0000000000000000")) }) It("podman run limits test", func() { SkipIfRootlessCgroupsV1("Setting limits not supported on cgroupv1 for rootless users") if !isRootless() { session := podmanTest.Podman([]string{"run", "--rm", "--ulimit", "rtprio=99", "--cap-add=sys_nice", fedoraMinimal, "cat", "/proc/self/sched"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) } session := podmanTest.Podman([]string{"run", "--rm", "--ulimit", "nofile=2048:2048", fedoraMinimal, "ulimit", "-n"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("2048")) session = podmanTest.Podman([]string{"run", "--rm", "--ulimit", "nofile=1024:1028", fedoraMinimal, "ulimit", "-n"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("1024")) if !CGROUPSV2 { // --oom-kill-disable not supported on cgroups v2. session = podmanTest.Podman([]string{"run", "--rm", "--oom-kill-disable=true", fedoraMinimal, "echo", "memory-hog"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) } session = podmanTest.Podman([]string{"run", "--rm", "--oom-score-adj=111", fedoraMinimal, "cat", "/proc/self/oom_score_adj"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal("111")) }) It("podman run limits host test", func() { SkipIfRemote("This can only be used for local tests") var l syscall.Rlimit err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"run", "--rm", "--ulimit", "host", fedoraMinimal, "ulimit", "-Hn"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) ulimitCtrStr := strings.TrimSpace(session.OutputToString()) ulimitCtr, err := strconv.ParseUint(ulimitCtrStr, 10, 0) Expect(err).To(BeNil()) Expect(ulimitCtr).Should(BeNumerically(">=", l.Max)) }) It("podman run with cidfile", func() { session := podmanTest.Podman([]string{"run", "--cidfile", tempdir + "cidfile", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) err := os.Remove(tempdir + "cidfile") Expect(err).To(BeNil()) }) It("podman run sysctl test", func() { SkipIfRootless("Network sysctls are not available root rootless") session := podmanTest.Podman([]string{"run", "--rm", "--sysctl", "net.core.somaxconn=65535", ALPINE, "sysctl", "net.core.somaxconn"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("net.core.somaxconn = 65535")) // network sysctls should fail if --net=host is set session = podmanTest.Podman([]string{"run", "--net", "host", "--rm", "--sysctl", "net.core.somaxconn=65535", ALPINE, "sysctl", "net.core.somaxconn"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(125)) }) It("podman run blkio-weight test", func() { SkipIfRootlessCgroupsV1("Setting blkio-weight not supported on cgroupv1 for rootless users") SkipIfRootless("By default systemd doesn't delegate io to rootless users") if CGROUPSV2 { if _, err := os.Stat("/sys/fs/cgroup/io.stat"); os.IsNotExist(err) { Skip("Kernel does not have io.stat") } if _, err := os.Stat("/sys/fs/cgroup/system.slice/io.bfq.weight"); os.IsNotExist(err) { Skip("Kernel does not support BFQ IO scheduler") } session := podmanTest.Podman([]string{"run", "--rm", "--blkio-weight=15", ALPINE, "sh", "-c", "cat /sys/fs/cgroup/io.bfq.weight"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) // there was a documentation issue in the kernel that reported a different range [1-10000] for the io controller. // older versions of crun/runc used it. For the time being allow both versions to pass the test. // FIXME: drop "|51" once all the runtimes we test have the fix in place. Expect(strings.Replace(session.OutputToString(), "default ", "", 1)).To(MatchRegexp("15|51")) } else { if _, err := os.Stat("/sys/fs/cgroup/blkio/blkio.weight"); os.IsNotExist(err) { Skip("Kernel does not support blkio.weight") } session := podmanTest.Podman([]string{"run", "--rm", "--blkio-weight=15", ALPINE, "cat", "/sys/fs/cgroup/blkio/blkio.weight"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("15")) } }) It("podman run device-read-bps test", func() { SkipIfRootless("FIXME: requested cgroup controller `io` is not available") SkipIfRootlessCgroupsV1("Setting device-read-bps not supported on cgroupv1 for rootless users") var session *PodmanSessionIntegration if CGROUPSV2 { session = podmanTest.Podman([]string{"run", "--rm", "--device-read-bps=/dev/zero:1mb", ALPINE, "sh", "-c", "cat /sys/fs/cgroup/$(sed -e 's|0::||' < /proc/self/cgroup)/io.max"}) } else { session = podmanTest.Podman([]string{"run", "--rm", "--device-read-bps=/dev/zero:1mb", ALPINE, "cat", "/sys/fs/cgroup/blkio/blkio.throttle.read_bps_device"}) } session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) if !CGROUPSV2 { // TODO: Test Simplification. For now, we only care about exit(0) w/ cgroupsv2 Expect(session.OutputToString()).To(ContainSubstring("1048576")) } }) It("podman run device-write-bps test", func() { SkipIfRootless("FIXME: requested cgroup controller `io` is not available") SkipIfRootlessCgroupsV1("Setting device-write-bps not supported on cgroupv1 for rootless users") var session *PodmanSessionIntegration if CGROUPSV2 { session = podmanTest.Podman([]string{"run", "--rm", "--device-write-bps=/dev/zero:1mb", ALPINE, "sh", "-c", "cat /sys/fs/cgroup/$(sed -e 's|0::||' < /proc/self/cgroup)/io.max"}) } else { session = podmanTest.Podman([]string{"run", "--rm", "--device-write-bps=/dev/zero:1mb", ALPINE, "cat", "/sys/fs/cgroup/blkio/blkio.throttle.write_bps_device"}) } session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) if !CGROUPSV2 { // TODO: Test Simplification. For now, we only care about exit(0) w/ cgroupsv2 Expect(session.OutputToString()).To(ContainSubstring("1048576")) } }) It("podman run device-read-iops test", func() { SkipIfRootless("FIXME: requested cgroup controller `io` is not available") SkipIfRootlessCgroupsV1("Setting device-read-iops not supported on cgroupv1 for rootless users") var session *PodmanSessionIntegration if CGROUPSV2 { session = podmanTest.Podman([]string{"run", "--rm", "--device-read-iops=/dev/zero:100", ALPINE, "sh", "-c", "cat /sys/fs/cgroup/$(sed -e 's|0::||' < /proc/self/cgroup)/io.max"}) } else { session = podmanTest.Podman([]string{"run", "--rm", "--device-read-iops=/dev/zero:100", ALPINE, "cat", "/sys/fs/cgroup/blkio/blkio.throttle.read_iops_device"}) } session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) if !CGROUPSV2 { // TODO: Test Simplification. For now, we only care about exit(0) w/ cgroupsv2 Expect(session.OutputToString()).To(ContainSubstring("100")) } }) It("podman run device-write-iops test", func() { SkipIfRootless("FIXME: requested cgroup controller `io` is not available") SkipIfRootlessCgroupsV1("Setting device-write-iops not supported on cgroupv1 for rootless users") var session *PodmanSessionIntegration if CGROUPSV2 { session = podmanTest.Podman([]string{"run", "--rm", "--device-write-iops=/dev/zero:100", ALPINE, "sh", "-c", "cat /sys/fs/cgroup/$(sed -e 's|0::||' < /proc/self/cgroup)/io.max"}) } else { session = podmanTest.Podman([]string{"run", "--rm", "--device-write-iops=/dev/zero:100", ALPINE, "cat", "/sys/fs/cgroup/blkio/blkio.throttle.write_iops_device"}) } session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) if !CGROUPSV2 { // TODO: Test Simplification. For now, we only care about exit(0) w/ cgroupsv2 Expect(session.OutputToString()).To(ContainSubstring("100")) } }) It("podman run notify_socket", func() { SkipIfRemote("This can only be used for local tests") host := GetHostDistributionInfo() if host.Distribution != "rhel" && host.Distribution != "centos" && host.Distribution != "fedora" { Skip("this test requires a working runc") } sock := filepath.Join(podmanTest.TempDir, "notify") addr := net.UnixAddr{ Name: sock, Net: "unixgram", } socket, err := net.ListenUnixgram("unixgram", &addr) Expect(err).To(BeNil()) defer os.Remove(sock) defer socket.Close() os.Setenv("NOTIFY_SOCKET", sock) defer os.Unsetenv("NOTIFY_SOCKET") session := podmanTest.Podman([]string{"run", ALPINE, "printenv", "NOTIFY_SOCKET"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(len(session.OutputToStringArray())).To(BeNumerically(">", 0)) }) It("podman run log-opt", func() { log := filepath.Join(podmanTest.TempDir, "/container.log") session := podmanTest.Podman([]string{"run", "--rm", "--log-driver", "k8s-file", "--log-opt", fmt.Sprintf("path=%s", log), ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) _, err := os.Stat(log) Expect(err).To(BeNil()) _ = os.Remove(log) }) It("podman run tagged image", func() { podmanTest.AddImageToRWStore(BB) tag := podmanTest.Podman([]string{"tag", BB, "bb"}) tag.WaitWithDefaultTimeout() Expect(tag).Should(Exit(0)) session := podmanTest.Podman([]string{"run", "--rm", "bb", "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) }) It("podman test hooks", func() { hcheck := "/run/hookscheck" hooksDir := tempdir + "/hooks" os.Mkdir(hooksDir, 0755) fileutils.CopyFile("hooks/hooks.json", hooksDir) os.Setenv("HOOK_OPTION", fmt.Sprintf("--hooks-dir=%s", hooksDir)) os.Remove(hcheck) session := podmanTest.Podman([]string{"run", ALPINE, "ls"}) session.Wait(10) os.Unsetenv("HOOK_OPTION") Expect(session).Should(Exit(0)) }) It("podman run with subscription secrets", func() { SkipIfRemote("--default-mount-file option is not supported in podman-remote") containersDir := filepath.Join(podmanTest.TempDir, "containers") err := os.MkdirAll(containersDir, 0755) Expect(err).To(BeNil()) secretsDir := filepath.Join(podmanTest.TempDir, "rhel", "secrets") err = os.MkdirAll(secretsDir, 0755) Expect(err).To(BeNil()) mountsFile := filepath.Join(containersDir, "mounts.conf") mountString := secretsDir + ":/run/secrets" err = ioutil.WriteFile(mountsFile, []byte(mountString), 0755) Expect(err).To(BeNil()) secretsFile := filepath.Join(secretsDir, "test.txt") secretsString := "Testing secrets mount. I am mounted!" err = ioutil.WriteFile(secretsFile, []byte(secretsString), 0755) Expect(err).To(BeNil()) targetDir := tempdir + "/symlink/target" err = os.MkdirAll(targetDir, 0755) Expect(err).To(BeNil()) keyFile := filepath.Join(targetDir, "key.pem") err = ioutil.WriteFile(keyFile, []byte(mountString), 0755) Expect(err).To(BeNil()) execSession := SystemExec("ln", []string{"-s", targetDir, filepath.Join(secretsDir, "mysymlink")}) Expect(execSession).Should(Exit(0)) session := podmanTest.Podman([]string{"--default-mounts-file=" + mountsFile, "run", "--rm", ALPINE, "cat", "/run/secrets/test.txt"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal(secretsString)) session = podmanTest.Podman([]string{"--default-mounts-file=" + mountsFile, "run", "--rm", ALPINE, "ls", "/run/secrets/mysymlink"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("key.pem")) }) It("podman run with FIPS mode secrets", func() { SkipIfRootless("rootless can not manipulate system-fips file") fipsFile := "/etc/system-fips" err = ioutil.WriteFile(fipsFile, []byte{}, 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "ls", "/run/secrets"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("system-fips")) err = os.Remove(fipsFile) Expect(err).To(BeNil()) }) It("podman run without group-add", func() { session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "id"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Not(ContainSubstring("27(video),777,65533(nogroup)"))) }) It("podman run with group-add", func() { session := podmanTest.Podman([]string{"run", "--rm", "--group-add=audio", "--group-add=nogroup", "--group-add=777", ALPINE, "id"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("777,65533(nogroup)")) }) It("podman run with user (default)", func() { session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "id"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("uid=0(root) gid=0(root)")) }) It("podman run with user (integer, not in /etc/passwd)", func() { session := podmanTest.Podman([]string{"run", "--rm", "--user=1234", ALPINE, "id"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal("uid=1234(1234) gid=0(root)")) }) It("podman run with user (integer, in /etc/passwd)", func() { session := podmanTest.Podman([]string{"run", "--rm", "--user=8", ALPINE, "id"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("uid=8(mail) gid=12(mail)")) }) It("podman run with user (username)", func() { session := podmanTest.Podman([]string{"run", "--rm", "--user=mail", ALPINE, "id"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("uid=8(mail) gid=12(mail)")) }) It("podman run with user:group (username:integer)", func() { session := podmanTest.Podman([]string{"run", "--rm", "--user=mail:21", ALPINE, "id"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal("uid=8(mail) gid=21(ftp)")) }) It("podman run with user:group (integer:groupname)", func() { session := podmanTest.Podman([]string{"run", "--rm", "--user=8:ftp", ALPINE, "id"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal("uid=8(mail) gid=21(ftp)")) }) It("podman run with user, verify caps dropped", func() { session := podmanTest.Podman([]string{"run", "--rm", "--user=1234", ALPINE, "grep", "CapEff", "/proc/self/status"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) capEff := strings.Split(session.OutputToString(), " ") Expect("0000000000000000").To(Equal(capEff[1])) }) It("podman run with attach stdin outputs container ID", func() { session := podmanTest.Podman([]string{"run", "--attach", "stdin", ALPINE, "printenv"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) ps := podmanTest.Podman([]string{"ps", "-aq", "--no-trunc"}) ps.WaitWithDefaultTimeout() Expect(ps).Should(Exit(0)) Expect(ps.OutputToString()).To(ContainSubstring(session.OutputToString())) }) It("podman run with attach stdout does not print stderr", func() { session := podmanTest.Podman([]string{"run", "--rm", "--attach", "stdout", ALPINE, "ls", "/doesnotexist"}) session.WaitWithDefaultTimeout() Expect(session.OutputToString()).To(Equal("")) }) It("podman run with attach stderr does not print stdout", func() { session := podmanTest.Podman([]string{"run", "--rm", "--attach", "stderr", ALPINE, "ls", "/"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal("")) }) It("podman run attach nonsense errors", func() { session := podmanTest.Podman([]string{"run", "--rm", "--attach", "asdfasdf", ALPINE, "ls", "/"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(125)) }) It("podman run exit code on failure to exec", func() { session := podmanTest.Podman([]string{"run", ALPINE, "/etc"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(126)) }) It("podman run error on exec", func() { session := podmanTest.Podman([]string{"run", ALPINE, "sh", "-c", "exit 100"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(100)) }) It("podman run with named volume", func() { session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "stat", "-c", "%a %Y", "/var/tmp"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) perms := session.OutputToString() session = podmanTest.Podman([]string{"run", "--rm", "-v", "test:/var/tmp", ALPINE, "stat", "-c", "%a %Y", "/var/tmp"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal(perms)) }) It("podman run with built-in volume image", func() { session := podmanTest.Podman([]string{"run", "--rm", redis, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) dockerfile := fmt.Sprintf(`FROM %s RUN mkdir -p /myvol/data && chown -R mail.0 /myvol VOLUME ["/myvol/data"] USER mail`, BB) podmanTest.BuildImage(dockerfile, "test", "false") session = podmanTest.Podman([]string{"run", "--rm", "test", "ls", "-al", "/myvol/data"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("mail root")) }) It("podman run --volumes-from flag", func() { vol := filepath.Join(podmanTest.TempDir, "vol-test") err := os.MkdirAll(vol, 0755) Expect(err).To(BeNil()) filename := "test.txt" volFile := filepath.Join(vol, filename) data := "Testing --volumes-from!!!" err = ioutil.WriteFile(volFile, []byte(data), 0755) Expect(err).To(BeNil()) mountpoint := "/myvol/" session := podmanTest.Podman([]string{"create", "--volume", vol + ":" + mountpoint + ":z", ALPINE, "cat", mountpoint + filename}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) ctrID := session.OutputToString() session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID, ALPINE, "cat", mountpoint + filename}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal(data)) session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID, ALPINE, "sh", "-c", "echo test >> " + mountpoint + filename}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"start", "--attach", ctrID}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal(data + "test")) }) It("podman run --volumes-from flag options", func() { vol := filepath.Join(podmanTest.TempDir, "vol-test") err := os.MkdirAll(vol, 0755) Expect(err).To(BeNil()) filename := "test.txt" volFile := filepath.Join(vol, filename) data := "Testing --volumes-from!!!" err = ioutil.WriteFile(volFile, []byte(data), 0755) Expect(err).To(BeNil()) mountpoint := "/myvol/" session := podmanTest.Podman([]string{"create", "--volume", vol + ":" + mountpoint, ALPINE, "cat", mountpoint + filename}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) ctrID := session.OutputToString() // check that the read only option works session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID + ":ro", ALPINE, "touch", mountpoint + "abc.txt"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(1)) Expect(session.ErrorToString()).To(ContainSubstring("Read-only file system")) // check that both z and ro options work session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID + ":ro,z", ALPINE, "cat", mountpoint + filename}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal(data)) // check that multiple ro/rw are not working session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID + ":ro,rw", ALPINE, "cat", mountpoint + filename}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(125)) Expect(session.ErrorToString()).To(ContainSubstring("cannot set ro or rw options more than once")) // check that multiple z options are not working session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID + ":z,z,ro", ALPINE, "cat", mountpoint + filename}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(125)) Expect(session.ErrorToString()).To(ContainSubstring("cannot set :z more than once in mount options")) // create new read only volume session = podmanTest.Podman([]string{"create", "--volume", vol + ":" + mountpoint + ":ro", ALPINE, "cat", mountpoint + filename}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) ctrID = session.OutputToString() // check if the original volume was mounted as read only that --volumes-from also mount it as read only session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID, ALPINE, "touch", mountpoint + "abc.txt"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(1)) Expect(session.ErrorToString()).To(ContainSubstring("Read-only file system")) }) It("podman run --volumes-from flag with built-in volumes", func() { session := podmanTest.Podman([]string{"create", redis, "sh"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) ctrID := session.OutputToString() session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID, ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("data")) }) It("podman run --volumes flag with multiple volumes", func() { vol1 := filepath.Join(podmanTest.TempDir, "vol-test1") err := os.MkdirAll(vol1, 0755) Expect(err).To(BeNil()) vol2 := filepath.Join(podmanTest.TempDir, "vol-test2") err = os.MkdirAll(vol2, 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"run", "--volume", vol1 + ":/myvol1:z", "--volume", vol2 + ":/myvol2:z", ALPINE, "touch", "/myvol2/foo.txt"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) }) It("podman run --volumes flag with empty host dir", func() { vol1 := filepath.Join(podmanTest.TempDir, "vol-test1") err := os.MkdirAll(vol1, 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"run", "--volume", ":/myvol1:z", ALPINE, "touch", "/myvol2/foo.txt"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) Expect(session.ErrorToString()).To(ContainSubstring("directory cannot be empty")) session = podmanTest.Podman([]string{"run", "--volume", vol1 + ":", ALPINE, "touch", "/myvol2/foo.txt"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) Expect(session.ErrorToString()).To(ContainSubstring("directory cannot be empty")) }) It("podman run --mount flag with multiple mounts", func() { vol1 := filepath.Join(podmanTest.TempDir, "vol-test1") err := os.MkdirAll(vol1, 0755) Expect(err).To(BeNil()) vol2 := filepath.Join(podmanTest.TempDir, "vol-test2") err = os.MkdirAll(vol2, 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"run", "--mount", "type=bind,src=" + vol1 + ",target=/myvol1,z", "--mount", "type=bind,src=" + vol2 + ",target=/myvol2,z", ALPINE, "touch", "/myvol2/foo.txt"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) }) It("podman run findmnt nothing shared", func() { vol1 := filepath.Join(podmanTest.TempDir, "vol-test1") err := os.MkdirAll(vol1, 0755) Expect(err).To(BeNil()) vol2 := filepath.Join(podmanTest.TempDir, "vol-test2") err = os.MkdirAll(vol2, 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"run", "--volume", vol1 + ":/myvol1:z", "--volume", vol2 + ":/myvol2:z", fedoraMinimal, "findmnt", "-o", "TARGET,PROPAGATION"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Not(ContainSubstring("shared"))) }) It("podman run findmnt shared", func() { vol1 := filepath.Join(podmanTest.TempDir, "vol-test1") err := os.MkdirAll(vol1, 0755) Expect(err).To(BeNil()) vol2 := filepath.Join(podmanTest.TempDir, "vol-test2") err = os.MkdirAll(vol2, 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"run", "--volume", vol1 + ":/myvol1:z", "--volume", vol2 + ":/myvol2:shared,z", fedoraMinimal, "findmnt", "-o", "TARGET,PROPAGATION"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) match, shared := session.GrepString("shared") Expect(match).Should(BeTrue()) // make sure it's only shared (and not 'shared,slave') isSharedOnly := !strings.Contains(shared[0], "shared,") Expect(isSharedOnly).Should(BeTrue()) }) It("podman run --security-opts proc-opts=", func() { session := podmanTest.Podman([]string{"run", "--security-opt", "proc-opts=nosuid,exec", fedoraMinimal, "findmnt", "-noOPTIONS", "/proc"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) output := session.OutputToString() Expect(output).To(ContainSubstring("nosuid")) Expect(output).To(Not(ContainSubstring("exec"))) }) It("podman run --mount type=bind,bind-nonrecursive", func() { SkipIfRootless("FIXME: rootless users are not allowed to mount bind-nonrecursive (Could this be a Kernel bug?") session := podmanTest.Podman([]string{"run", "--mount", "type=bind,bind-nonrecursive,slave,src=/,target=/host", fedoraMinimal, "findmnt", "-nR", "/host"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToStringArray()).To(HaveLen(1)) }) It("podman run --mount type=devpts,target=/foo/bar", func() { session := podmanTest.Podman([]string{"run", "--mount", "type=devpts,target=/foo/bar", fedoraMinimal, "stat", "-f", "-c%T", "/foo/bar"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("devpts")) }) It("podman run --mount type=devpts,target=/dev/pts with uid, gid and mode", func() { // runc doesn't seem to honor uid= so avoid testing it session := podmanTest.Podman([]string{"run", "-t", "--mount", "type=devpts,target=/dev/pts,uid=1000,gid=1001,mode=123", fedoraMinimal, "stat", "-c%g-%a", "/dev/pts/0"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("1001-123")) }) It("podman run --pod automatically", func() { session := podmanTest.Podman([]string{"run", "-d", "--pod", "new:foobar", ALPINE, "nc", "-l", "-p", "8686"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--pod", "foobar", ALPINE, "/bin/sh", "-c", "echo test | nc -w 1 127.0.0.1 8686"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) check := podmanTest.Podman([]string{"pod", "ps", "--no-trunc"}) check.WaitWithDefaultTimeout() Expect(check.OutputToString()).To(ContainSubstring("foobar")) }) It("podman run --pod new with hostname", func() { hostname := "abc" session := podmanTest.Podman([]string{"run", "--pod", "new:foobar", "--hostname", hostname, ALPINE, "cat", "/etc/hostname"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring(hostname)) }) It("podman run --rm should work", func() { session := podmanTest.Podman([]string{"run", "--name", "test", "--rm", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"wait", "test"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) numContainers := podmanTest.NumberOfContainers() Expect(numContainers).To(Equal(0)) }) It("podman run --rm failed container should delete itself", func() { session := podmanTest.Podman([]string{"run", "--name", "test", "--rm", ALPINE, "foo"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) session = podmanTest.Podman([]string{"wait", "test"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) numContainers := podmanTest.NumberOfContainers() Expect(numContainers).To(Equal(0)) }) It("podman run failed container should NOT delete itself", func() { session := podmanTest.Podman([]string{"run", ALPINE, "foo"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) // If remote we could have a race condition session = podmanTest.Podman([]string{"wait", "test"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) numContainers := podmanTest.NumberOfContainers() Expect(numContainers).To(Equal(1)) }) It("podman run readonly container should NOT mount /dev/shm read/only", func() { session := podmanTest.Podman([]string{"run", "--read-only", ALPINE, "mount"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Not(ContainSubstring("/dev/shm type tmpfs (ro,"))) }) It("podman run readonly container should NOT mount /run noexec", func() { session := podmanTest.Podman([]string{"run", "--read-only", ALPINE, "sh", "-c", "mount | grep \"/run \""}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Not(ContainSubstring("noexec"))) }) It("podman run with bad healthcheck retries", func() { session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "[\"foo\"]", "--health-retries", "0", ALPINE, "top"}) session.Wait() Expect(session).To(ExitWithError()) Expect(session.ErrorToString()).To(ContainSubstring("healthcheck-retries must be greater than 0")) }) It("podman run with bad healthcheck timeout", func() { session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "foo", "--health-timeout", "0s", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) Expect(session.ErrorToString()).To(ContainSubstring("healthcheck-timeout must be at least 1 second")) }) It("podman run with bad healthcheck start-period", func() { session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "foo", "--health-start-period", "-1s", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) Expect(session.ErrorToString()).To(ContainSubstring("healthcheck-start-period must be 0 seconds or greater")) }) It("podman run with --add-host and --no-hosts fails", func() { session := podmanTest.Podman([]string{"run", "-dt", "--add-host", "test1:127.0.0.1", "--no-hosts", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) }) It("podman run with restart-policy always restarts containers", func() { testDir := filepath.Join(podmanTest.RunRoot, "restart-test") err := os.MkdirAll(testDir, 0755) Expect(err).To(BeNil()) aliveFile := filepath.Join(testDir, "running") file, err := os.Create(aliveFile) Expect(err).To(BeNil()) file.Close() session := podmanTest.Podman([]string{"run", "-dt", "--restart", "always", "-v", fmt.Sprintf("%s:/tmp/runroot:Z", testDir), ALPINE, "sh", "-c", "touch /tmp/runroot/ran && while test -r /tmp/runroot/running; do sleep 0.1s; done"}) found := false testFile := filepath.Join(testDir, "ran") for i := 0; i < 30; i++ { time.Sleep(1 * time.Second) if _, err := os.Stat(testFile); err == nil { found = true err = os.Remove(testFile) Expect(err).To(BeNil()) break } } Expect(found).To(BeTrue()) err = os.Remove(aliveFile) Expect(err).To(BeNil()) session.WaitWithDefaultTimeout() // 10 seconds to restart the container found = false for i := 0; i < 10; i++ { time.Sleep(1 * time.Second) if _, err := os.Stat(testFile); err == nil { found = true break } } Expect(found).To(BeTrue()) }) It("podman run with cgroups=split", func() { SkipIfNotSystemd(podmanTest.CgroupManager, "do not test --cgroups=split if not running on systemd") SkipIfRootlessCgroupsV1("Disable cgroups not supported on cgroupv1 for rootless users") SkipIfRemote("--cgroups=split cannot be used in remote mode") checkLines := func(lines []string) { cgroup := "" for _, line := range lines { parts := strings.SplitN(line, ":", 3) if len(parts) < 2 { continue } if !CGROUPSV2 { // ignore unified on cgroup v1. // both runc and crun do not set it. // crun does not set named hierarchies. if parts[1] == "" || strings.Contains(parts[1], "name=") { continue } } if parts[2] == "/" { continue } if cgroup == "" { cgroup = parts[2] continue } Expect(cgroup).To(Equal(parts[2])) } } container := podmanTest.PodmanSystemdScope([]string{"run", "--rm", "--cgroups=split", ALPINE, "cat", "/proc/self/cgroup"}) container.WaitWithDefaultTimeout() Expect(container).Should(Exit(0)) checkLines(container.OutputToStringArray()) // check that --cgroups=split is honored also when a container runs in a pod container = podmanTest.PodmanSystemdScope([]string{"run", "--rm", "--pod", "new:split-test-pod", "--cgroups=split", ALPINE, "cat", "/proc/self/cgroup"}) container.WaitWithDefaultTimeout() Expect(container).Should(Exit(0)) checkLines(container.OutputToStringArray()) }) It("podman run with cgroups=disabled runs without cgroups", func() { SkipIfRootlessCgroupsV1("Disable cgroups not supported on cgroupv1 for rootless users") // Only works on crun if !strings.Contains(podmanTest.OCIRuntime, "crun") { Skip("Test only works on crun") } ownsCgroup, err := cgroups.UserOwnsCurrentSystemdCgroup() Expect(err).ShouldNot(HaveOccurred()) if !ownsCgroup { // Podman moves itself to a new cgroup if it doesn't own the current cgroup Skip("Test only works when Podman owns the current cgroup") } trim := func(i string) string { return strings.TrimSuffix(i, "\n") } curCgroupsBytes, err := ioutil.ReadFile("/proc/self/cgroup") Expect(err).ShouldNot(HaveOccurred()) curCgroups := trim(string(curCgroupsBytes)) fmt.Printf("Output:\n%s\n", curCgroups) Expect(curCgroups).ToNot(Equal("")) container := podmanTest.Podman([]string{"run", "--cgroupns=host", "--cgroups=disabled", ALPINE, "cat", "/proc/self/cgroup"}) container.WaitWithDefaultTimeout() Expect(container).Should(Exit(0)) ctrCgroups := trim(container.OutputToString()) fmt.Printf("Output\n:%s\n", ctrCgroups) Expect(ctrCgroups).To(Equal(curCgroups)) }) It("podman run with cgroups=enabled makes cgroups", func() { SkipIfRootlessCgroupsV1("Enable cgroups not supported on cgroupv1 for rootless users") // Only works on crun if !strings.Contains(podmanTest.OCIRuntime, "crun") { Skip("Test only works on crun") } curCgroupsBytes, err := ioutil.ReadFile("/proc/self/cgroup") Expect(err).To(BeNil()) var curCgroups string = string(curCgroupsBytes) fmt.Printf("Output:\n%s\n", curCgroups) Expect(curCgroups).To(Not(Equal(""))) ctrName := "testctr" container := podmanTest.Podman([]string{"run", "--name", ctrName, "-d", "--cgroups=enabled", ALPINE, "top"}) container.WaitWithDefaultTimeout() Expect(container).Should(Exit(0)) // Get PID and get cgroups of that PID inspectOut := podmanTest.InspectContainer(ctrName) Expect(inspectOut).To(HaveLen(1)) pid := inspectOut[0].State.Pid Expect(pid).To(Not(Equal(0))) ctrCgroupsBytes, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) Expect(err).To(BeNil()) var ctrCgroups string = string(ctrCgroupsBytes) fmt.Printf("Output\n:%s\n", ctrCgroups) Expect(curCgroups).To(Not(Equal(ctrCgroups))) }) It("podman run with cgroups=garbage errors", func() { session := podmanTest.Podman([]string{"run", "-d", "--cgroups=garbage", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) }) It("podman run should fail with nonexistent authfile", func() { session := podmanTest.Podman([]string{"run", "--authfile", "/tmp/nonexistent", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) }) It("podman run --device-cgroup-rule", func() { SkipIfRootless("rootless users are not allowed to mknod") deviceCgroupRule := "c 42:* rwm" session := podmanTest.Podman([]string{"run", "--cap-add", "mknod", "--name", "test", "-d", "--device-cgroup-rule", deviceCgroupRule, ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"exec", "test", "mknod", "newDev", "c", "42", "1"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) }) It("podman run --replace", func() { // Make sure we error out with --name. session := podmanTest.Podman([]string{"create", "--replace", ALPINE, "/bin/sh"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(125)) // Run and replace 5 times in a row the "same" container. ctrName := "testCtr" for i := 0; i < 5; i++ { session := podmanTest.Podman([]string{"run", "--detach", "--replace", "--name", ctrName, ALPINE, "/bin/sh"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) } }) It("podman run --preserve-fds", func() { devNull, err := os.Open("/dev/null") Expect(err).To(BeNil()) defer devNull.Close() files := []*os.File{ devNull, } session := podmanTest.PodmanExtraFiles([]string{"run", "--preserve-fds", "1", ALPINE, "ls"}, files) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) }) It("podman run --preserve-fds invalid fd", func() { session := podmanTest.Podman([]string{"run", "--preserve-fds", "2", ALPINE}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) Expect(session.ErrorToString()).To(ContainSubstring("file descriptor 3 is not available")) }) It("podman run --privileged and --group-add", func() { groupName := "mail" session := podmanTest.Podman([]string{"run", "-t", "-i", "--group-add", groupName, "--privileged", fedoraMinimal, "groups"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring(groupName)) }) It("podman run --tz", func() { testDir := filepath.Join(podmanTest.RunRoot, "tz-test") err := os.MkdirAll(testDir, 0755) Expect(err).To(BeNil()) tzFile := filepath.Join(testDir, "tzfile.txt") file, err := os.Create(tzFile) Expect(err).To(BeNil()) _, err = file.WriteString("Hello") Expect(err).To(BeNil()) file.Close() badTZFile := fmt.Sprintf("../../../%s", tzFile) session := podmanTest.Podman([]string{"run", "--tz", badTZFile, "--rm", ALPINE, "date"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) Expect(session.ErrorToString()).To(ContainSubstring("error finding timezone for container")) err = os.Remove(tzFile) Expect(err).To(BeNil()) session = podmanTest.Podman([]string{"run", "--tz", "foo", "--rm", ALPINE, "date"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) session = podmanTest.Podman([]string{"run", "--tz", "America", "--rm", ALPINE, "date"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) session = podmanTest.Podman([]string{"run", "--tz", "Pacific/Honolulu", "--rm", ALPINE, "date", "+'%H %Z'"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("HST")) session = podmanTest.Podman([]string{"run", "--tz", "local", "--rm", ALPINE, "date", "+'%H %Z'"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) t := time.Now() z, _ := t.Zone() h := strconv.Itoa(t.Hour()) Expect(session.OutputToString()).To(ContainSubstring(z)) Expect(session.OutputToString()).To(ContainSubstring(h)) }) It("podman run verify pids-limit", func() { SkipIfCgroupV1("pids-limit not supported on cgroup V1") limit := "4321" session := podmanTest.Podman([]string{"run", "--pids-limit", limit, "--net=none", "--rm", ALPINE, "cat", "/sys/fs/cgroup/pids.max"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring(limit)) }) It("podman run umask", func() { if !strings.Contains(podmanTest.OCIRuntime, "crun") { Skip("Test only works on crun") } session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "sh", "-c", "umask"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal("0022")) session = podmanTest.Podman([]string{"run", "--umask", "0002", "--rm", ALPINE, "sh", "-c", "umask"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal("0002")) session = podmanTest.Podman([]string{"run", "--umask", "0077", "--rm", fedoraMinimal, "umask"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal("0077")) session = podmanTest.Podman([]string{"run", "--umask", "22", "--rm", ALPINE, "sh", "-c", "umask"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal("0022")) session = podmanTest.Podman([]string{"run", "--umask", "9999", "--rm", ALPINE, "sh", "-c", "umask"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) Expect(session.ErrorToString()).To(ContainSubstring("Invalid umask")) }) It("podman run makes workdir from image", func() { // BuildImage does not seem to work remote dockerfile := fmt.Sprintf(`FROM %s WORKDIR /madethis`, BB) podmanTest.BuildImage(dockerfile, "test", "false") session := podmanTest.Podman([]string{"run", "--rm", "test", "pwd"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("/madethis")) }) It("podman run --entrypoint does not use image command", func() { session := podmanTest.Podman([]string{"run", "--entrypoint", "/bin/echo", ALPINE}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) // We can't guarantee the output is completely empty, some // nonprintables seem to work their way in. Expect(session.OutputToString()).To(Not(ContainSubstring("/bin/sh"))) }) It("podman run a container with log-level (lower case)", func() { session := podmanTest.Podman([]string{"--log-level=info", "run", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) }) It("podman run a container with log-level (upper case)", func() { session := podmanTest.Podman([]string{"--log-level=INFO", "run", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) }) It("podman run a container with --pull never should fail if no local store", func() { session := podmanTest.Podman([]string{"run", "--pull", "never", "docker.io/library/debian:latest", "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(125)) }) It("podman run container with --pull missing and only pull once", func() { session := podmanTest.Podman([]string{"run", "--pull", "missing", cirros, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.ErrorToString()).To(ContainSubstring("Trying to pull")) session = podmanTest.Podman([]string{"run", "--pull", "missing", cirros, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.ErrorToString()).ToNot(ContainSubstring("Trying to pull")) }) It("podman run container with --pull missing should pull image multiple times", func() { session := podmanTest.Podman([]string{"run", "--pull", "always", cirros, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.ErrorToString()).To(ContainSubstring("Trying to pull")) session = podmanTest.Podman([]string{"run", "--pull", "always", cirros, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.ErrorToString()).To(ContainSubstring("Trying to pull")) }) It("podman run container with hostname and hostname environment variable", func() { hostnameEnv := "test123" session := podmanTest.Podman([]string{"run", "--hostname", "testctr", "--env", fmt.Sprintf("HOSTNAME=%s", hostnameEnv), ALPINE, "printenv", "HOSTNAME"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring(hostnameEnv)) }) It("podman run --secret", func() { secretsString := "somesecretdata" secretFilePath := filepath.Join(podmanTest.TempDir, "secret") err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"secret", "create", "mysecret", secretFilePath}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--secret", "mysecret", "--name", "secr", ALPINE, "cat", "/run/secrets/mysecret"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal(secretsString)) session = podmanTest.Podman([]string{"inspect", "secr", "--format", " {{(index .Config.Secrets 0).Name}}"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("mysecret")) }) It("podman run --secret source=mysecret,type=mount", func() { secretsString := "somesecretdata" secretFilePath := filepath.Join(podmanTest.TempDir, "secret") err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"secret", "create", "mysecret", secretFilePath}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,type=mount", "--name", "secr", ALPINE, "cat", "/run/secrets/mysecret"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal(secretsString)) session = podmanTest.Podman([]string{"inspect", "secr", "--format", " {{(index .Config.Secrets 0).Name}}"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("mysecret")) }) It("podman run --secret source=mysecret,type=mount with target", func() { secretsString := "somesecretdata" secretFilePath := filepath.Join(podmanTest.TempDir, "secret") err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"secret", "create", "mysecret_target", secretFilePath}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret_target,type=mount,target=hello", "--name", "secr_target", ALPINE, "cat", "/run/secrets/hello"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal(secretsString)) session = podmanTest.Podman([]string{"inspect", "secr_target", "--format", " {{(index .Config.Secrets 0).Name}}"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("mysecret_target")) }) It("podman run --secret source=mysecret,type=mount with target at /tmp", func() { secretsString := "somesecretdata" secretFilePath := filepath.Join(podmanTest.TempDir, "secret") err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"secret", "create", "mysecret_target2", secretFilePath}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret_target2,type=mount,target=/tmp/hello", "--name", "secr_target2", ALPINE, "cat", "/tmp/hello"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal(secretsString)) session = podmanTest.Podman([]string{"inspect", "secr_target2", "--format", " {{(index .Config.Secrets 0).Name}}"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("mysecret_target2")) }) It("podman run --secret source=mysecret,type=env", func() { secretsString := "somesecretdata" secretFilePath := filepath.Join(podmanTest.TempDir, "secret") err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"secret", "create", "mysecret", secretFilePath}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,type=env", "--name", "secr", ALPINE, "printenv", "mysecret"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal(secretsString)) }) It("podman run --secret target option", func() { secretsString := "somesecretdata" secretFilePath := filepath.Join(podmanTest.TempDir, "secret") err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"secret", "create", "mysecret", secretFilePath}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,type=env,target=anotherplace", "--name", "secr", ALPINE, "printenv", "anotherplace"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal(secretsString)) }) It("podman run --secret mount with uid, gid, mode options", func() { secretsString := "somesecretdata" secretFilePath := filepath.Join(podmanTest.TempDir, "secret") err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"secret", "create", "mysecret", secretFilePath}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) // check default permissions session = podmanTest.Podman([]string{"run", "--secret", "mysecret", "--name", "secr", ALPINE, "ls", "-l", "/run/secrets/mysecret"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) output := session.OutputToString() Expect(output).To(ContainSubstring("-r--r--r--")) Expect(output).To(ContainSubstring("root")) session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,type=mount,uid=1000,gid=1001,mode=777", "--name", "secr2", ALPINE, "ls", "-ln", "/run/secrets/mysecret"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) output = session.OutputToString() Expect(output).To(ContainSubstring("-rwxrwxrwx")) Expect(output).To(ContainSubstring("1000")) Expect(output).To(ContainSubstring("1001")) }) It("podman run --secret with --user", func() { secretsString := "somesecretdata" secretFilePath := filepath.Join(podmanTest.TempDir, "secret") err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"secret", "create", "mysecret", secretFilePath}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) session = podmanTest.Podman([]string{"run", "--secret", "mysecret", "--name", "nonroot", "--user", "200:200", ALPINE, "cat", "/run/secrets/mysecret"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(Equal(secretsString)) }) It("podman run invalid secret option", func() { secretsString := "somesecretdata" secretFilePath := filepath.Join(podmanTest.TempDir, "secret") err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755) Expect(err).To(BeNil()) session := podmanTest.Podman([]string{"secret", "create", "mysecret", secretFilePath}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) // Invalid type session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,type=other", "--name", "secr", ALPINE, "printenv", "mysecret"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) // Invalid option session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,invalid=invalid", "--name", "secr", ALPINE, "printenv", "mysecret"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) // Option syntax not valid session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,type", "--name", "secr", ALPINE, "printenv", "mysecret"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) // mount option with env type session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,type=env,uid=1000", "--name", "secr", ALPINE, "printenv", "mysecret"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) // No source given session = podmanTest.Podman([]string{"run", "--secret", "type=env", "--name", "secr", ALPINE, "printenv", "mysecret"}) session.WaitWithDefaultTimeout() Expect(session).To(ExitWithError()) }) It("podman run --requires", func() { depName := "ctr1" depContainer := podmanTest.Podman([]string{"create", "--name", depName, ALPINE, "top"}) depContainer.WaitWithDefaultTimeout() Expect(depContainer).Should(Exit(0)) mainName := "ctr2" mainContainer := podmanTest.Podman([]string{"run", "--name", mainName, "--requires", depName, "-d", ALPINE, "top"}) mainContainer.WaitWithDefaultTimeout() Expect(mainContainer).Should(Exit(0)) stop := podmanTest.Podman([]string{"stop", "--all"}) stop.WaitWithDefaultTimeout() Expect(stop).Should(Exit(0)) start := podmanTest.Podman([]string{"start", mainName}) start.WaitWithDefaultTimeout() Expect(start).Should(Exit(0)) running := podmanTest.Podman([]string{"ps", "-q"}) running.WaitWithDefaultTimeout() Expect(running).Should(Exit(0)) Expect(running.OutputToStringArray()).To(HaveLen(2)) }) It("podman run with pidfile", func() { SkipIfRemote("pidfile not handled by remote") pidfile := tempdir + "pidfile" session := podmanTest.Podman([]string{"run", "--pidfile", pidfile, ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) readFirstLine := func(path string) string { content, err := ioutil.ReadFile(path) Expect(err).To(BeNil()) return strings.Split(string(content), "\n")[0] } containerPID := readFirstLine(pidfile) _, err = strconv.Atoi(containerPID) // Make sure it's a proper integer Expect(err).To(BeNil()) }) It("podman run check personality support", func() { // TODO: Remove this as soon as this is merged and made available in our CI https://github.com/opencontainers/runc/pull/3126. if !strings.Contains(podmanTest.OCIRuntime, "crun") { Skip("Test only works on crun") } session := podmanTest.Podman([]string{"run", "--personality=LINUX32", "--name=testpersonality", ALPINE, "uname", "-a"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.OutputToString()).To(ContainSubstring("i686")) }) It("podman run /dev/shm has nosuid,noexec,nodev", func() { session := podmanTest.Podman([]string{"run", ALPINE, "grep", "/dev/shm", "/proc/self/mountinfo"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) output := session.OutputToString() Expect(output).To(ContainSubstring("nosuid")) Expect(output).To(ContainSubstring("noexec")) Expect(output).To(ContainSubstring("nodev")) }) })
[ "\"container\"", "\"SKIP_USERNS\"" ]
[]
[ "container", "SKIP_USERNS" ]
[]
["container", "SKIP_USERNS"]
go
2
0
selfdrive/updated.py
#!/usr/bin/env python3 # Safe Update: A simple service that waits for network access and tries to # update every 10 minutes. It's intended to make the OP update process more # robust against Git repository corruption. This service DOES NOT try to fix # an already-corrupt BASEDIR Git repo, only prevent it from happening. # # During normal operation, both onroad and offroad, the update process makes # no changes to the BASEDIR install of OP. All update attempts are performed # in a disposable staging area provided by OverlayFS. It assumes the deleter # process provides enough disk space to carry out the process. # # If an update succeeds, a flag is set, and the update is swapped in at the # next reboot. If an update is interrupted or otherwise fails, the OverlayFS # upper layer and metadata can be discarded before trying again. # # The swap on boot is triggered by launch_chffrplus.sh # gated on the existence of $FINALIZED/.overlay_consistent and also the # existence and mtime of $BASEDIR/.overlay_init. # # Other than build byproducts, BASEDIR should not be modified while this # service is running. Developers modifying code directly in BASEDIR should # disable this service. import os import datetime import subprocess import psutil import shutil import signal import fcntl import time import threading from pathlib import Path from typing import List, Tuple, Optional from common.basedir import BASEDIR from common.params import Params from selfdrive.hardware import EON, TICI, HARDWARE from selfdrive.swaglog import cloudlog from selfdrive.controls.lib.alertmanager import set_offroad_alert from selfdrive.hardware.tici.agnos import flash_agnos_update LOCK_FILE = os.getenv("UPDATER_LOCK_FILE", "/tmp/safe_staging_overlay.lock") STAGING_ROOT = os.getenv("UPDATER_STAGING_ROOT", "/data/safe_staging") NEOSUPDATE_DIR = os.getenv("UPDATER_NEOSUPDATE_DIR", "/data/neoupdate") OVERLAY_UPPER = os.path.join(STAGING_ROOT, "upper") OVERLAY_METADATA = os.path.join(STAGING_ROOT, "metadata") OVERLAY_MERGED = os.path.join(STAGING_ROOT, "merged") FINALIZED = os.path.join(STAGING_ROOT, "finalized") class WaitTimeHelper: def __init__(self, proc): self.proc = proc self.ready_event = threading.Event() self.shutdown = False signal.signal(signal.SIGTERM, self.graceful_shutdown) signal.signal(signal.SIGINT, self.graceful_shutdown) signal.signal(signal.SIGHUP, self.update_now) def graceful_shutdown(self, signum: int, frame) -> None: # umount -f doesn't appear effective in avoiding "device busy" on NEOS, # so don't actually die until the next convenient opportunity in main(). cloudlog.info("caught SIGINT/SIGTERM, dismounting overlay at next opportunity") # forward the signal to all our child processes child_procs = self.proc.children(recursive=True) for p in child_procs: p.send_signal(signum) self.shutdown = True self.ready_event.set() def update_now(self, signum: int, frame) -> None: cloudlog.info("caught SIGHUP, running update check immediately") self.ready_event.set() def sleep(self, t: float) -> None: self.ready_event.wait(timeout=t) def run(cmd: List[str], cwd: Optional[str] = None, low_priority: bool = False): if low_priority: cmd = ["nice", "-n", "19"] + cmd return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT, encoding='utf8') def set_consistent_flag(consistent: bool) -> None: os.sync() consistent_file = Path(os.path.join(FINALIZED, ".overlay_consistent")) if consistent: consistent_file.touch() elif not consistent and consistent_file.exists(): consistent_file.unlink() os.sync() def set_params(new_version: bool, failed_count: int, exception: Optional[str]) -> None: params = Params() params.put("UpdateFailedCount", str(failed_count)) if failed_count == 0: t = datetime.datetime.utcnow().isoformat() params.put("LastUpdateTime", t.encode('utf8')) if exception is None: params.delete("LastUpdateException") else: params.put("LastUpdateException", exception) if new_version: try: with open(os.path.join(FINALIZED, "RELEASES.md"), "rb") as f: r = f.read() r = r[:r.find(b'\n\n')] # Slice latest release notes params.put("ReleaseNotes", r + b"\n") except Exception: params.put("ReleaseNotes", "") params.put("UpdateAvailable", "1") def setup_git_options(cwd: str) -> None: # We sync FS object atimes (which NEOS doesn't use) and mtimes, but ctimes # are outside user control. Make sure Git is set up to ignore system ctimes, # because they change when we make hard links during finalize. Otherwise, # there is a lot of unnecessary churn. This appears to be a common need on # OSX as well: https://www.git-tower.com/blog/make-git-rebase-safe-on-osx/ # We are using copytree to copy the directory, which also changes # inode numbers. Ignore those changes too. git_cfg = [ ("core.trustctime", "false"), ("core.checkStat", "minimal"), ] for option, value in git_cfg: run(["git", "config", option, value], cwd) def dismount_overlay() -> None: if os.path.ismount(OVERLAY_MERGED): cloudlog.info("unmounting existing overlay") args = ["umount", "-l", OVERLAY_MERGED] if TICI: args = ["sudo"] + args run(args) def init_overlay() -> None: overlay_init_file = Path(os.path.join(BASEDIR, ".overlay_init")) # Re-create the overlay if BASEDIR/.git has changed since we created the overlay if overlay_init_file.is_file(): git_dir_path = os.path.join(BASEDIR, ".git") new_files = run(["find", git_dir_path, "-newer", str(overlay_init_file)]) if not len(new_files.splitlines()): # A valid overlay already exists return else: cloudlog.info(".git directory changed, recreating overlay") cloudlog.info("preparing new safe staging area") params = Params() params.put("UpdateAvailable", "0") set_consistent_flag(False) dismount_overlay() if os.path.isdir(STAGING_ROOT): shutil.rmtree(STAGING_ROOT) for dirname in [STAGING_ROOT, OVERLAY_UPPER, OVERLAY_METADATA, OVERLAY_MERGED]: os.mkdir(dirname, 0o755) if os.lstat(BASEDIR).st_dev != os.lstat(OVERLAY_MERGED).st_dev: raise RuntimeError("base and overlay merge directories are on different filesystems; not valid for overlay FS!") # Leave a timestamped canary in BASEDIR to check at startup. The device clock # should be correct by the time we get here. If the init file disappears, or # critical mtimes in BASEDIR are newer than .overlay_init, continue.sh can # assume that BASEDIR has used for local development or otherwise modified, # and skips the update activation attempt. consistent_file = Path(os.path.join(BASEDIR, ".overlay_consistent")) if consistent_file.is_file(): consistent_file.unlink() overlay_init_file.touch() os.sync() overlay_opts = f"lowerdir={BASEDIR},upperdir={OVERLAY_UPPER},workdir={OVERLAY_METADATA}" mount_cmd = ["mount", "-t", "overlay", "-o", overlay_opts, "none", OVERLAY_MERGED] if TICI: run(["sudo"] + mount_cmd) run(["sudo", "chmod", "755", os.path.join(OVERLAY_METADATA, "work")]) else: run(mount_cmd) git_diff = run(["git", "diff"], OVERLAY_MERGED, low_priority=True) params.put("GitDiff", git_diff) cloudlog.info(f"git diff output:\n{git_diff}") def finalize_update() -> None: """Take the current OverlayFS merged view and finalize a copy outside of OverlayFS, ready to be swapped-in at BASEDIR. Copy using shutil.copytree""" # Remove the update ready flag and any old updates cloudlog.info("creating finalized version of the overlay") set_consistent_flag(False) # Copy the merged overlay view and set the update ready flag if os.path.exists(FINALIZED): shutil.rmtree(FINALIZED) shutil.copytree(OVERLAY_MERGED, FINALIZED, symlinks=True) set_consistent_flag(True) cloudlog.info("done finalizing overlay") def handle_agnos_update(wait_helper): cur_version = HARDWARE.get_os_version() updated_version = run(["bash", "-c", r"unset AGNOS_VERSION && source launch_env.sh && \ echo -n $AGNOS_VERSION"], OVERLAY_MERGED).strip() cloudlog.info(f"AGNOS version check: {cur_version} vs {updated_version}") if cur_version == updated_version: return # prevent an openpilot getting swapped in with a mismatched or partially downloaded agnos set_consistent_flag(False) cloudlog.info(f"Beginning background installation for AGNOS {updated_version}") set_offroad_alert("Offroad_NeosUpdate", True) manifest_path = os.path.join(OVERLAY_MERGED, "selfdrive/hardware/tici/agnos.json") flash_agnos_update(manifest_path, cloudlog) set_offroad_alert("Offroad_NeosUpdate", False) def handle_neos_update(wait_helper: WaitTimeHelper) -> None: cur_neos = HARDWARE.get_os_version() updated_neos = run(["bash", "-c", r"unset REQUIRED_NEOS_VERSION && source launch_env.sh && \ echo -n $REQUIRED_NEOS_VERSION"], OVERLAY_MERGED).strip() cloudlog.info(f"NEOS version check: {cur_neos} vs {updated_neos}") if cur_neos == updated_neos: return cloudlog.info(f"Beginning background download for NEOS {updated_neos}") set_offroad_alert("Offroad_NeosUpdate", True) updater_path = os.path.join(OVERLAY_MERGED, "installer/updater/updater") update_manifest = f"file://{OVERLAY_MERGED}/installer/updater/update.json" neos_downloaded = False start_time = time.monotonic() # Try to download for one day while not neos_downloaded and not wait_helper.shutdown and \ (time.monotonic() - start_time < 60*60*24): wait_helper.ready_event.clear() try: run([updater_path, "bgcache", update_manifest], OVERLAY_MERGED, low_priority=True) neos_downloaded = True except subprocess.CalledProcessError: cloudlog.info("NEOS background download failed, retrying") wait_helper.sleep(120) # If the download failed, we'll show the alert again when we retry set_offroad_alert("Offroad_NeosUpdate", False) if not neos_downloaded: raise Exception("Failed to download NEOS update") cloudlog.info(f"NEOS background download successful, took {time.monotonic() - start_time} seconds") def check_git_fetch_result(fetch_txt): err_msg = "Failed to add the host to the list of known hosts (/data/data/com.termux/files/home/.ssh/known_hosts).\n" return len(fetch_txt) > 0 and (fetch_txt != err_msg) def check_for_update() -> Tuple[bool, bool]: setup_git_options(OVERLAY_MERGED) try: git_fetch_output = run(["git", "fetch", "--dry-run"], OVERLAY_MERGED, low_priority=True) return True, check_git_fetch_result(git_fetch_output) except subprocess.CalledProcessError: return False, False def fetch_update(wait_helper: WaitTimeHelper) -> bool: cloudlog.info("attempting git fetch inside staging overlay") setup_git_options(OVERLAY_MERGED) git_fetch_output = run(["git", "fetch"], OVERLAY_MERGED, low_priority=True) cloudlog.info("git fetch success: %s", git_fetch_output) cur_hash = run(["git", "rev-parse", "HEAD"], OVERLAY_MERGED).rstrip() upstream_hash = run(["git", "rev-parse", "@{u}"], OVERLAY_MERGED).rstrip() new_version = cur_hash != upstream_hash git_fetch_result = check_git_fetch_result(git_fetch_output) cloudlog.info("comparing %s to %s" % (cur_hash, upstream_hash)) if new_version or git_fetch_result: cloudlog.info("Running update") if new_version: cloudlog.info("git reset in progress") r = [ run(["git", "reset", "--hard", "@{u}"], OVERLAY_MERGED, low_priority=True), run(["git", "clean", "-xdf"], OVERLAY_MERGED, low_priority=True ), run(["git", "submodule", "init"], OVERLAY_MERGED, low_priority=True), run(["git", "submodule", "update"], OVERLAY_MERGED, low_priority=True), ] cloudlog.info("git reset success: %s", '\n'.join(r)) if EON: handle_neos_update(wait_helper) elif TICI: handle_agnos_update(wait_helper) # Create the finalized, ready-to-swap update finalize_update() cloudlog.info("openpilot update successful!") else: cloudlog.info("nothing new from git at this time") return new_version def main(): params = Params() if params.get("DisableUpdates") == b"1": raise RuntimeError("updates are disabled by the DisableUpdates param") if EON and os.geteuid() != 0: raise RuntimeError("updated must be launched as root!") # Set low io priority proc = psutil.Process() if psutil.LINUX: proc.ionice(psutil.IOPRIO_CLASS_BE, value=7) ov_lock_fd = open(LOCK_FILE, 'w') try: fcntl.flock(ov_lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as e: raise RuntimeError("couldn't get overlay lock; is another updated running?") from e # Wait for IsOffroad to be set before our first update attempt wait_helper = WaitTimeHelper(proc) wait_helper.sleep(30) overlay_init = Path(os.path.join(BASEDIR, ".overlay_init")) if overlay_init.exists(): overlay_init.unlink() first_run = True last_fetch_time = 0 update_failed_count = 0 # Run the update loop # * every 1m, do a lightweight internet/update check # * every 10m, do a full git fetch while not wait_helper.shutdown: update_now = wait_helper.ready_event.is_set() wait_helper.ready_event.clear() # Don't run updater while onroad or if the time's wrong time_wrong = datetime.datetime.utcnow().year < 2019 is_onroad = params.get("IsOffroad") != b"1" if is_onroad or time_wrong: wait_helper.sleep(30) cloudlog.info("not running updater, not offroad") continue # Attempt an update exception = None new_version = False update_failed_count += 1 try: init_overlay() internet_ok, update_available = check_for_update() if internet_ok and not update_available: update_failed_count = 0 # Fetch updates at most every 10 minutes if internet_ok and (update_now or time.monotonic() - last_fetch_time > 60*10): new_version = fetch_update(wait_helper) update_failed_count = 0 last_fetch_time = time.monotonic() if first_run and not new_version and os.path.isdir(NEOSUPDATE_DIR): shutil.rmtree(NEOSUPDATE_DIR) first_run = False except subprocess.CalledProcessError as e: cloudlog.event( "update process failed", cmd=e.cmd, output=e.output, returncode=e.returncode ) exception = f"command failed: {e.cmd}\n{e.output}" except Exception as e: cloudlog.exception("uncaught updated exception, shouldn't happen") exception = str(e) set_params(new_version, update_failed_count, exception) wait_helper.sleep(60) dismount_overlay() if __name__ == "__main__": main()
[]
[]
[ "UPDATER_NEOSUPDATE_DIR", "UPDATER_STAGING_ROOT", "UPDATER_LOCK_FILE" ]
[]
["UPDATER_NEOSUPDATE_DIR", "UPDATER_STAGING_ROOT", "UPDATER_LOCK_FILE"]
python
3
0
svc/cmd/earbug/server.go
package main import ( "context" "flag" "net/http" "os" "strings" "sync" "time" "github.com/go-logr/logr" spotifyauth "github.com/zmb3/spotify/v2/auth" clientv3 "go.etcd.io/etcd/client/v3" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" ) func New(flags *flag.FlagSet) *Server { var s Server s.pollWorkerMap = make(map[string]struct{}) flag.StringVar(&s.CanonicalURL, "url", "https://earbug.seankhliao.com", "url app is hosted on") flag.StringVar(&s.StoreURL, "store", "http://etcd-0.etcd:2379", "etcd url") flag.StringVar(&s.StorePrefix, "store-prefix", "earbug", "key prefix in etcd") flag.DurationVar(&s.PollInterval, "poll-interval", 5*time.Minute, "time between spotify polls") return &s } type Server struct { CanonicalURL string StoreURL string StorePrefix string PollInterval time.Duration l logr.Logger t trace.Tracer Auth *spotifyauth.Authenticator Store *clientv3.Client pollWorkerShutdown chan struct{} pollWorkerWg sync.WaitGroup pollWorkerMap map[string]struct{} pollWorkerMu sync.Mutex } func (s *Server) RegisterHTTP(ctx context.Context, mux *http.ServeMux, l logr.Logger, m metric.MeterProvider, t trace.TracerProvider, shutdown func()) error { s.l = l.WithName("earbug") s.t = t.Tracer("earbug") s.Auth = spotifyauth.New( spotifyauth.WithRedirectURL(s.CanonicalURL+"/auth/callback"), spotifyauth.WithScopes( spotifyauth.ScopeUserReadRecentlyPlayed, ), spotifyauth.WithClientID(strings.TrimSpace(os.Getenv("SPOTIFY_ID"))), spotifyauth.WithClientSecret(strings.TrimSpace(os.Getenv("SPOTIFY_SECRET"))), ) s.pollWorkerShutdown = make(chan struct{}) var err error s.Store, err = clientv3.NewFromURL(s.StoreURL) if err != nil { return err } err = s.startStoredPoll(ctx) if err != nil { return err } mux.HandleFunc("/auth/callback", s.handleAuthCallback) mux.HandleFunc("/user/history", s.handleUserHistory) mux.HandleFunc("/", s.handleIndex) return nil }
[ "\"SPOTIFY_ID\"", "\"SPOTIFY_SECRET\"" ]
[]
[ "SPOTIFY_ID", "SPOTIFY_SECRET" ]
[]
["SPOTIFY_ID", "SPOTIFY_SECRET"]
go
2
0
go/vt/vttest/environment.go
/* Copyright 2019 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package vttest import ( "fmt" "io/ioutil" "math/rand" "os" "path" "strings" "time" // we use gRPC everywhere, so import the vtgate client. _ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn" ) // Environment is the interface that customizes the global settings for // the test cluster. Usually the same environment settings are shared by // all the LocalCluster instances in a given test suite, with each instance // receiving a different Config for specific tests. // For Environments that create temporary data on-disk and clean it up on // termination, a brand new instance of Environment should be passed to // each LocalCluster. type Environment interface { // BinaryPath returns the full path to the given executable BinaryPath(bin string) string // MySQLManager is the constructor for the MySQL manager that will // be used by the cluster. The manager must take care of initializing // and destructing the MySQL instance(s) that will be used by the cluster. // See: vttest.MySQLManager for the interface the manager must implement MySQLManager(mycnf []string, snapshot string) (MySQLManager, error) // Directory is the path where the local cluster will store all its // data and metadata. For local testing, this should probably be an // unique temporary directory. Directory() string // LogDirectory is the directory where logs for all services in the // cluster will be stored. LogDirectory() string // VtcomoboArguments are the extra commandline arguments that will be // passed to `vtcombo` VtcomboArguments() []string // ProcessHealthCheck returns a HealthChecker for the given service. // The HealthChecker takes an address and attempts to check whether // the service is up and healthy. // If a given service does not require any custom health checks, // nil can be returned. ProcessHealthCheck(name string) HealthChecker // DefaultProtocol is the protocol used to communicate with the // Vitess cluster. This is usually "grpc". DefaultProtocol() string // PortForProtocol returns the listening port for a given service // on the given protocol. If protocol is empty, the default protocol // for each service is assumed. PortForProtocol(name, protocol string) int // EnvVars returns the environment variables that will be passed // to all Vitess processes spawned by the local cluster. These variables // always take precedence over the variables inherited from the current // process. EnvVars() []string // TearDown is called during LocalCluster.TearDown() to cleanup // any temporary data in the environment. Environments that can // last through several test runs do not need to implement it. TearDown() error } // LocalTestEnv is an Environment implementation for local testing // See: NewLocalTestEnv() type LocalTestEnv struct { BasePort int TmpPath string DefaultMyCnf []string Env []string } // DefaultMySQLFlavor is the MySQL flavor used by vttest when MYSQL_FLAVOR is not // set in the environment const DefaultMySQLFlavor = "MySQL56" // GetMySQLOptions returns the default option set for the given MySQL // flavor. If flavor is not set, the value from the `MYSQL_FLAVOR` env // variable is used, and if this is not set, DefaultMySQLFlavor will // be used. // Returns the name of the MySQL flavor being used, the set of MySQL CNF // files specific to this flavor, and any errors. func GetMySQLOptions(flavor string) (string, []string, error) { if flavor == "" { flavor = os.Getenv("MYSQL_FLAVOR") } if flavor == "" { flavor = DefaultMySQLFlavor } mycnf := []string{} mycnf = append(mycnf, "config/mycnf/default-fast.cnf") for i, cnf := range mycnf { mycnf[i] = path.Join(os.Getenv("VTROOT"), cnf) } return flavor, mycnf, nil } // EnvVars implements EnvVars for LocalTestEnv func (env *LocalTestEnv) EnvVars() []string { return env.Env } // BinaryPath implements BinaryPath for LocalTestEnv func (env *LocalTestEnv) BinaryPath(binary string) string { return path.Join(os.Getenv("VTROOT"), "bin", binary) } // MySQLManager implements MySQLManager for LocalTestEnv func (env *LocalTestEnv) MySQLManager(mycnf []string, snapshot string) (MySQLManager, error) { return &Mysqlctl{ Binary: env.BinaryPath("mysqlctl"), InitFile: path.Join(os.Getenv("VTROOT"), "config/init_db.sql"), Directory: env.TmpPath, Port: env.PortForProtocol("mysql", ""), MyCnf: append(env.DefaultMyCnf, mycnf...), Env: env.EnvVars(), UID: 1, }, nil } // DefaultProtocol implements DefaultProtocol for LocalTestEnv. // It is always GRPC. func (env *LocalTestEnv) DefaultProtocol() string { return "grpc" } // PortForProtocol implements PortForProtocol for LocalTestEnv. func (env *LocalTestEnv) PortForProtocol(name, proto string) int { switch name { case "vtcombo": if proto == "grpc" { return env.BasePort + 1 } return env.BasePort case "mysql": return env.BasePort + 2 case "vtcombo_mysql_port": return env.BasePort + 3 default: panic("unknown service name: " + name) } } // ProcessHealthCheck implements ProcessHealthCheck for LocalTestEnv. // By default, it performs no service-specific health checks func (env *LocalTestEnv) ProcessHealthCheck(name string) HealthChecker { return nil } // VtcomboArguments implements VtcomboArguments for LocalTestEnv. func (env *LocalTestEnv) VtcomboArguments() []string { return []string{ "-service_map", strings.Join( []string{"grpc-vtgateservice", "grpc-vtctl", "grpc-vtctld"}, ",", ), "-enable_queries", } } // LogDirectory implements LogDirectory for LocalTestEnv. func (env *LocalTestEnv) LogDirectory() string { return path.Join(env.TmpPath, "logs") } // Directory implements Directory for LocalTestEnv. func (env *LocalTestEnv) Directory() string { return env.TmpPath } // TearDown implements TearDown for LocalTestEnv func (env *LocalTestEnv) TearDown() error { return os.RemoveAll(env.TmpPath) } func tmpdir(dataroot string) (dir string, err error) { dir, err = ioutil.TempDir(dataroot, "vttest") return } func randomPort() int { v := rand.Int31n(20000) return int(v + 10000) } // NewLocalTestEnv returns an instance of the default test environment used // for local testing Vitess. The defaults are as follows: // - Directory() is a random temporary directory in VTDATAROOT, which is cleaned // up when closing the Environment. // - LogDirectory() is the `logs` subdir inside Directory() // - The MySQL flavor is set to `flavor`. If the argument is not set, it will // default to the value of MYSQL_FLAVOR, and if this variable is not set, to // DefaultMySQLFlavor // - PortForProtocol() will return ports based off the given basePort. If basePort // is zero, a random port between 10000 and 20000 will be chosen. // - DefaultProtocol() is always "grpc" // - ProcessHealthCheck() performs no service-specific health checks // - BinaryPath() will look up the default Vitess binaries in VTROOT // - MySQLManager() will return a vttest.Mysqlctl instance, configured with the // given MySQL flavor. This will use the `mysqlctl` command to initialize and // teardown a single mysqld instance. func NewLocalTestEnv(flavor string, basePort int) (*LocalTestEnv, error) { directory, err := tmpdir(os.Getenv("VTDATAROOT")) if err != nil { return nil, err } return NewLocalTestEnvWithDirectory(flavor, basePort, directory) } // NewLocalTestEnvWithDirectory returns a new instance of the default test // environment with a directory explicitly specified. func NewLocalTestEnvWithDirectory(flavor string, basePort int, directory string) (*LocalTestEnv, error) { if _, err := os.Stat(path.Join(directory, "logs")); os.IsNotExist(err) { err := os.Mkdir(path.Join(directory, "logs"), 0700) if err != nil { return nil, err } } flavor, mycnf, err := GetMySQLOptions(flavor) if err != nil { return nil, err } if basePort == 0 { basePort = randomPort() } return &LocalTestEnv{ BasePort: basePort, TmpPath: directory, DefaultMyCnf: mycnf, Env: []string{ fmt.Sprintf("VTDATAROOT=%s", directory), fmt.Sprintf("MYSQL_FLAVOR=%s", flavor), }, }, nil } func defaultEnvFactory() (Environment, error) { return NewLocalTestEnv("", 0) } func init() { rand.Seed(time.Now().UnixNano()) } // NewDefaultEnv is an user-configurable callback that returns a new Environment // instance with the default settings. // This callback is only used in cases where the user hasn't explicitly set // the Env variable when initializing a LocalCluster var NewDefaultEnv = defaultEnvFactory
[ "\"MYSQL_FLAVOR\"", "\"VTROOT\"", "\"VTROOT\"", "\"VTROOT\"", "\"VTDATAROOT\"" ]
[]
[ "MYSQL_FLAVOR", "VTDATAROOT", "VTROOT" ]
[]
["MYSQL_FLAVOR", "VTDATAROOT", "VTROOT"]
go
3
0
examples/http-server/main.go
// Copyright 2017 Joshua J Baker. All rights reserved. // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. package main import ( "bytes" "flag" "fmt" "log" "os" "strconv" "strings" "time" "github.com/oscarmherrera/evio" ) var res string type request struct { proto, method string path, query string head, body string remoteAddr string } func main() { var port int var loops int var aaaa bool var noparse bool var unixsocket string var stdlib bool flag.StringVar(&unixsocket, "unixsocket", "", "unix socket") flag.IntVar(&port, "port", 8080, "server port") flag.BoolVar(&aaaa, "aaaa", false, "aaaaa....") flag.BoolVar(&noparse, "noparse", true, "do not parse requests") flag.BoolVar(&stdlib, "stdlib", false, "use stdlib") flag.IntVar(&loops, "loops", 0, "num loops") flag.Parse() if os.Getenv("NOPARSE") == "1" { noparse = true } if aaaa { res = strings.Repeat("a", 1024) } else { res = "Hello World!\r\n" } var events evio.Events events.NumLoops = loops events.Serving = func(srv evio.Server) (action evio.Action) { log.Printf("http server started on port %d (loops: %d)", port, srv.NumLoops) if unixsocket != "" { log.Printf("http server started at %s", unixsocket) } if stdlib { log.Printf("stdlib") } return } events.Opened = func(c evio.Conn) (out []byte, opts evio.Options, action evio.Action) { c.SetContext(&evio.InputStream{}) //log.Printf("opened: laddr: %v: raddr: %v", c.LocalAddr(), c.RemoteAddr()) return } events.Closed = func(c evio.Conn, err error) (action evio.Action) { //log.Printf("closed: %s: %s", c.LocalAddr().String(), c.RemoteAddr().String()) return } events.Data = func(c evio.Conn, in []byte) (out []byte, action evio.Action) { if in == nil { return } is := c.Context().(*evio.InputStream) data := is.Begin(in) if noparse && bytes.Contains(data, []byte("\r\n\r\n")) { // for testing minimal single packet request -> response. out = appendresp(nil, "200 OK", "", res) return } // process the pipeline var req request for { leftover, err := parsereq(data, &req) if err != nil { // bad thing happened out = appendresp(out, "500 Error", "", err.Error()+"\n") action = evio.Close break } else if len(leftover) == len(data) { // request not ready, yet break } // handle the request req.remoteAddr = c.RemoteAddr().String() out = appendhandle(out, &req) data = leftover } is.End(data) return } var ssuf string if stdlib { ssuf = "-net" } // We at least want the single http address. addrs := []string{fmt.Sprintf("tcp"+ssuf+"://:%d", port)} if unixsocket != "" { addrs = append(addrs, fmt.Sprintf("unix"+ssuf+"://%s", unixsocket)) } // Start serving! log.Fatal(evio.Serve(events, addrs...)) } // appendhandle handles the incoming request and appends the response to // the provided bytes, which is then returned to the caller. func appendhandle(b []byte, req *request) []byte { return appendresp(b, "200 OK", "", res) } // appendresp will append a valid http response to the provide bytes. // The status param should be the code plus text such as "200 OK". // The head parameter should be a series of lines ending with "\r\n" or empty. func appendresp(b []byte, status, head, body string) []byte { b = append(b, "HTTP/1.1"...) b = append(b, ' ') b = append(b, status...) b = append(b, '\r', '\n') b = append(b, "Server: evio\r\n"...) b = append(b, "Date: "...) b = time.Now().AppendFormat(b, "Mon, 02 Jan 2006 15:04:05 GMT") b = append(b, '\r', '\n') if len(body) > 0 { b = append(b, "Content-Length: "...) b = strconv.AppendInt(b, int64(len(body)), 10) b = append(b, '\r', '\n') } b = append(b, head...) b = append(b, '\r', '\n') if len(body) > 0 { b = append(b, body...) } return b } // parsereq is a very simple http request parser. This operation // waits for the entire payload to be buffered before returning a // valid request. func parsereq(data []byte, req *request) (leftover []byte, err error) { sdata := string(data) var i, s int var top string var clen int var q = -1 // method, path, proto line for ; i < len(sdata); i++ { if sdata[i] == ' ' { req.method = sdata[s:i] for i, s = i+1, i+1; i < len(sdata); i++ { if sdata[i] == '?' && q == -1 { q = i - s } else if sdata[i] == ' ' { if q != -1 { req.path = sdata[s:q] req.query = req.path[q+1 : i] } else { req.path = sdata[s:i] } for i, s = i+1, i+1; i < len(sdata); i++ { if sdata[i] == '\n' && sdata[i-1] == '\r' { req.proto = sdata[s:i] i, s = i+1, i+1 break } } break } } break } } if req.proto == "" { return data, fmt.Errorf("malformed request") } top = sdata[:s] for ; i < len(sdata); i++ { if i > 1 && sdata[i] == '\n' && sdata[i-1] == '\r' { line := sdata[s : i-1] s = i + 1 if line == "" { req.head = sdata[len(top)+2 : i+1] i++ if clen > 0 { if len(sdata[i:]) < clen { break } req.body = sdata[i : i+clen] i += clen } return data[i:], nil } if strings.HasPrefix(line, "Content-Length:") { n, err := strconv.ParseInt(strings.TrimSpace(line[len("Content-Length:"):]), 10, 64) if err == nil { clen = int(n) } } } } // not enough data return data, nil }
[ "\"NOPARSE\"" ]
[]
[ "NOPARSE" ]
[]
["NOPARSE"]
go
1
0
vendor/src/github.com/bugsnag/panicwrap/panicwrap_test.go
package panicwrap import ( "bytes" "fmt" "os" "os/exec" "strings" "testing" "time" ) func helperProcess(s ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcess", "--"} cs = append(cs, s...) env := []string{ "GO_WANT_HELPER_PROCESS=1", } cmd := exec.Command(os.Args[0], cs...) cmd.Env = append(env, os.Environ()...) cmd.Stdin = os.Stdin cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout return cmd } // This is executed by `helperProcess` in a separate process in order to // provider a proper sub-process environment to test some of our functionality. func TestHelperProcess(*testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } // Find the arguments to our helper, which are the arguments past // the "--" in the command line. args := os.Args for len(args) > 0 { if args[0] == "--" { args = args[1:] break } args = args[1:] } if len(args) == 0 { fmt.Fprintf(os.Stderr, "No command\n") os.Exit(2) } panicHandler := func(s string) { fmt.Fprintf(os.Stdout, "wrapped: %d", len(s)) os.Exit(0) } cmd, args := args[0], args[1:] switch cmd { case "no-panic-ordered-output": exitStatus, err := BasicWrap(panicHandler) if err != nil { fmt.Fprintf(os.Stderr, "wrap error: %s", err) os.Exit(1) } if exitStatus < 0 { for i := 0; i < 1000; i++ { os.Stdout.Write([]byte("a")) os.Stderr.Write([]byte("b")) } os.Exit(0) } os.Exit(exitStatus) case "no-panic-output": fmt.Fprint(os.Stdout, "i am output") fmt.Fprint(os.Stderr, "stderr out") os.Exit(0) case "panic-boundary": exitStatus, err := BasicWrap(panicHandler) if err != nil { fmt.Fprintf(os.Stderr, "wrap error: %s", err) os.Exit(1) } if exitStatus < 0 { // Simulate a panic but on two boundaries... fmt.Fprint(os.Stderr, "pan") os.Stderr.Sync() time.Sleep(100 * time.Millisecond) fmt.Fprint(os.Stderr, "ic: oh crap") os.Exit(2) } os.Exit(exitStatus) case "panic-long": exitStatus, err := BasicWrap(panicHandler) if err != nil { fmt.Fprintf(os.Stderr, "wrap error: %s", err) os.Exit(1) } if exitStatus < 0 { // Make a fake panic by faking the header and adding a // bunch of garbage. fmt.Fprint(os.Stderr, "panic: foo\n\n") for i := 0; i < 1024; i++ { fmt.Fprint(os.Stderr, "foobarbaz") } // Sleep so that it dumps the previous data //time.Sleep(1 * time.Millisecond) time.Sleep(500 * time.Millisecond) // Make a real panic panic("I AM REAL!") } os.Exit(exitStatus) case "panic": hidePanic := false if args[0] == "hide" { hidePanic = true } config := &WrapConfig{ Handler: panicHandler, HidePanic: hidePanic, } exitStatus, err := Wrap(config) if err != nil { fmt.Fprintf(os.Stderr, "wrap error: %s", err) os.Exit(1) } if exitStatus < 0 { panic("uh oh") } os.Exit(exitStatus) case "wrapped": child := false if len(args) > 0 && args[0] == "child" { child = true } config := &WrapConfig{ Handler: panicHandler, } exitStatus, err := Wrap(config) if err != nil { fmt.Fprintf(os.Stderr, "wrap error: %s", err) os.Exit(1) } if exitStatus < 0 { if child { fmt.Printf("%v", Wrapped(config)) } os.Exit(0) } if !child { fmt.Printf("%v", Wrapped(config)) } os.Exit(exitStatus) case "panic-monitor": config := &WrapConfig{ Handler: panicHandler, HidePanic: true, Monitor: true, } exitStatus, err := Wrap(config) if err != nil { fmt.Fprintf(os.Stderr, "wrap error: %s", err) os.Exit(1) } if exitStatus != -1 { fmt.Fprintf(os.Stderr, "wrap error: %s", err) os.Exit(1) } panic("uh oh") default: fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd) os.Exit(2) } } func TestPanicWrap_Output(t *testing.T) { stderr := new(bytes.Buffer) stdout := new(bytes.Buffer) p := helperProcess("no-panic-output") p.Stdout = stdout p.Stderr = stderr if err := p.Run(); err != nil { t.Fatalf("err: %s", err) } if !strings.Contains(stdout.String(), "i am output") { t.Fatalf("didn't forward: %#v", stdout.String()) } if !strings.Contains(stderr.String(), "stderr out") { t.Fatalf("didn't forward: %#v", stderr.String()) } } /* TODO(mitchellh): This property would be nice to gain. func TestPanicWrap_Output_Order(t *testing.T) { output := new(bytes.Buffer) p := helperProcess("no-panic-ordered-output") p.Stdout = output p.Stderr = output if err := p.Run(); err != nil { t.Fatalf("err: %s", err) } expectedBuf := new(bytes.Buffer) for i := 0; i < 1000; i++ { expectedBuf.WriteString("ab") } actual := strings.TrimSpace(output.String()) expected := strings.TrimSpace(expectedBuf.String()) if actual != expected { t.Fatalf("bad: %#v", actual) } } */ func TestPanicWrap_panicHide(t *testing.T) { stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) p := helperProcess("panic", "hide") p.Stdout = stdout p.Stderr = stderr if err := p.Run(); err != nil { t.Fatalf("err: %s", err) } if !strings.Contains(stdout.String(), "wrapped:") { t.Fatalf("didn't wrap: %#v", stdout.String()) } if strings.Contains(stderr.String(), "panic:") { t.Fatalf("shouldn't have panic: %#v", stderr.String()) } } func TestPanicWrap_panicShow(t *testing.T) { stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) p := helperProcess("panic", "show") p.Stdout = stdout p.Stderr = stderr if err := p.Run(); err != nil { t.Fatalf("err: %s", err) } if !strings.Contains(stdout.String(), "wrapped:") { t.Fatalf("didn't wrap: %#v", stdout.String()) } if !strings.Contains(stderr.String(), "panic:") { t.Fatalf("should have panic: %#v", stderr.String()) } } func TestPanicWrap_panicLong(t *testing.T) { stdout := new(bytes.Buffer) p := helperProcess("panic-long") p.Stdout = stdout p.Stderr = new(bytes.Buffer) if err := p.Run(); err != nil { t.Fatalf("err: %s", err) } if !strings.Contains(stdout.String(), "wrapped:") { t.Fatalf("didn't wrap: %#v", stdout.String()) } } func TestPanicWrap_panicBoundary(t *testing.T) { // TODO(mitchellh): panics are currently lost on boundaries t.SkipNow() stdout := new(bytes.Buffer) p := helperProcess("panic-boundary") p.Stdout = stdout //p.Stderr = new(bytes.Buffer) if err := p.Run(); err != nil { t.Fatalf("err: %s", err) } if !strings.Contains(stdout.String(), "wrapped: 1015") { t.Fatalf("didn't wrap: %#v", stdout.String()) } } func TestPanicWrap_monitor(t *testing.T) { stdout := new(bytes.Buffer) p := helperProcess("panic-monitor") p.Stdout = stdout //p.Stderr = new(bytes.Buffer) if err := p.Run(); err == nil || err.Error() != "exit status 2" { t.Fatalf("err: %s", err) } if !strings.Contains(stdout.String(), "wrapped:") { t.Fatalf("didn't wrap: %#v", stdout.String()) } } func TestWrapped(t *testing.T) { stdout := new(bytes.Buffer) p := helperProcess("wrapped", "child") p.Stdout = stdout if err := p.Run(); err != nil { t.Fatalf("err: %s", err) } if !strings.Contains(stdout.String(), "true") { t.Fatalf("bad: %#v", stdout.String()) } } func TestWrapped_parent(t *testing.T) { stdout := new(bytes.Buffer) p := helperProcess("wrapped") p.Stdout = stdout if err := p.Run(); err != nil { t.Fatalf("err: %s", err) } if !strings.Contains(stdout.String(), "false") { t.Fatalf("bad: %#v", stdout.String()) } }
[ "\"GO_WANT_HELPER_PROCESS\"" ]
[]
[ "GO_WANT_HELPER_PROCESS" ]
[]
["GO_WANT_HELPER_PROCESS"]
go
1
0
src/manage.py
#!/usr/bin/env python import os import sys if __name__ == '__main__': os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sonne.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
classification/keras_embedlig_v3new.py
seedNum=10 import random, statistics random.seed(seedNum) import numpy numpy.random.seed(seedNum) import os os.environ["CUDA_VISIBLE_DEVICES"]="-1" os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf tf.random.set_seed(seedNum) import sklearn, numpy, sys from sklearn import preprocessing, decomposition, cluster, model_selection import matplotlib.pyplot as plt #import keras from keras import optimizers, regularizers, utils from keras import backend as K from keras.layers import Input, Dense, Dropout, Add , Embedding, Concatenate, Flatten from keras.models import Model def customLoss(ytrue, ypred): print("\n\nXXX", ytrue, ypred, "YYYYY\n\n") print( dir(ytrue) ) print( ytrue._shape, type(ytrue) ) #print( help(ytrue) ) #for i in ytrue: # print("ONE I", i) #e = K.get_value(ytrue) #ytrue.eval(session=K.get_session()) #print( type(e), e) return K.sum(K.log(ytrue) - K.log(ypred)) def production(tab): autoencoder.fit(tab, tab, epochs=30, batch_size=20, shuffle=True) model_json = autoencoder.to_json() with open("model.json", "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 autoencoder.save_weights("model.h5") print("Saved model to disk") def parseData(fn): tabOryg=numpy.loadtxt(fn, delimiter='\t', ) solv1 =tabOryg[:,0] base1 =tabOryg[:,1] ligand1=tabOryg[:,2] ligand2=tabOryg[:,3] temp = tabOryg[:,4] sbs1 = tabOryg[:, 5:5+512] sbs2 = tabOryg[:, 5+512:5+512+512] yld = tabOryg[:,-1] return {'solvents':[solv1,], 'bases':[base1, ], 'ligands':[ligand1, ligand2], 'temp':temp, 'sbses':[sbs1,sbs2], 'yield':yld } def makeModel(inputDim, wide1=90, wide2=10, embDim=3, solventClasses=1+6, baseClasses=1+7, ligandClasses=1+81, act1='relu', act2='relu', act3='elu' ): subs1 = Input(shape=(inputDim,)) subs2 = Input(shape=(inputDim,)) temper = Input(shape=(1,) ) sol1 = Input(shape=(1,) ) base_1 = Input(shape=(1,) ) lgand1 = Input(shape=(1,) ) lgand2 = Input(shape=(1,) ) solventEmbd = Embedding(solventClasses, embDim, input_length=1) #solventEmbd = Dense(2, activation='relu') solvent1 = solventEmbd(sol1) baseEmbd = Embedding(baseClasses, embDim, input_length=1) #baseEmbd = Dense(2, activation='relu') base1 = baseEmbd(base_1) ligandEmbd = Embedding(ligandClasses, embDim, input_length=1) #ligandEmbd = Dense(2, activation='relu') ligand1 = ligandEmbd(lgand1) ligand2 = ligandEmbd(lgand2) #solvent = Add()([solvent1, solvent2, solvent3, solvent4]) #base = Add()([base1, base2]) ligand = Add()([ligand1, ligand2]) conditions =Concatenate()([solvent1,base1, ligand]) conditions =Flatten()(conditions) conditions = Concatenate()([conditions, temper]) sbs1 = Dense(wide1, activation=act1)(subs1) sbs2 = Dense(wide1, activation=act1)(subs2) conditionsAndSubstrate = Concatenate() ([conditions, sbs1,sbs2]) hide9 = Dense(wide2, activation=act2)(conditionsAndSubstrate) hide9 = Dropout(0.05)(hide9) outyield = Dense(1, activation=act3)(hide9) model = Model((sol1,base_1,lgand1,lgand2, temper, subs1, subs2), outyield) optim = optimizers.Adam() # lr=0.0005) #( clipnorm=1, lr=0.01, amsgrad=True ) lr:=default:=0.001 model.compile(optimizer=optim, loss='mean_squared_error', metrics=["mean_absolute_error",]) #model.compile(optimizer=optim, loss='mean_squared_error', metrics=["mean_absolute_error", customLoss]) model.summary() return model def training(data, model, nfolds=5, epochs=30, klas1=6, klas2=7): kf5=model_selection.KFold(n_splits=nfolds) #initWeights = model.get_weights() randInit = tf.keras.initializers.RandomNormal() #X = preprocessing.scale(X) iniw = model.get_weights() initShapes = [ i.shape for i in iniw] eachFoldData=[] print("LEN", len(data['sbses'][0]), len(data['yield']) ) histories=[] for trainIdx, testIdx in kf5.split(data['sbses'][0]): # Model((solvent1,solvent2,solvent3,solvent4,base1,base2, ligand1,ligand2, temper, sbs1, sbs2), outyield) solvent1train= data['solvents'][0][trainIdx] solvent1test= data['solvents'][0][testIdx] base1train= data['bases'][0][trainIdx] base1test= data['bases'][0][testIdx] ligand1train= data['ligands'][0][trainIdx] ligand1test= data['ligands'][0][testIdx] ligand2train= data['ligands'][1][trainIdx] ligand2test= data['ligands'][1][testIdx] temptrain = data['temp'][trainIdx] temptest = data['temp'][testIdx] sbs1train = data['sbses'][0][trainIdx] sbs1test = data['sbses'][0][testIdx] sbs2train = data['sbses'][1][trainIdx] sbs2test = data['sbses'][1][testIdx] Yldtrain, Yldtest = data['yield'][trainIdx], data['yield'][testIdx] eachEpochData=[] #model.set_weights(initWeights) model.set_weights( [randInit(shape=x) for x in initShapes] ) #for epochidx in range(epochs): inputTrain = [ solvent1train, base1train, ligand1train, ligand2train, temptrain, sbs1train, sbs2train] inputTest = [solvent1test, base1test, ligand1test, ligand2test, temptest, sbs1test, sbs2test] #history=model.fit(inputTrain, Yldtrain, epochs=epochs, batch_size=20, shuffle=True, validation_data=(inputTest, Yldtest), verbose=2) #histories.append( history.history) eachEpochData=[] for epochidx in range(epochs): model.fit(inputTrain, Yldtrain, epochs=1, batch_size=20, shuffle=True, verbose=2, validation_data=(inputTest, Yldtest)) topN = [] MAE = [] yieldRange = [] for testidx in testIdx: thisSolvClasses = numpy.zeros((klas1*klas2, 1)) thisBasesClasses = numpy.zeros((klas1*klas2, 1)) thisSolvClasses[0][0]= data['solvents'][0][testidx] thisBasesClasses[0][0]= data['bases'][0][testidx] thisLigand1 = numpy.array([ [data['ligands'][0][testidx],] for x in range(klas1*klas2)]) thisLigand2 = numpy.array([ [data['ligands'][1][testidx],] for x in range(klas1*klas2)]) thisTemp = numpy.array([ [data['temp'][testidx],] for x in range(klas1*klas2)]) thisSbs1 = numpy.array([ data['sbses'][0][testidx] for x in range(klas1*klas2)]) thisSbs2 = numpy.array([ data['sbses'][1][testidx] for x in range(klas1*klas2)]) pos =1 #print("XXX",data['solvents'][0][testidx], data['bases'][0][testidx]) for i in range(1, klas1+1): for j in range(1,klas2+1): if abs(i - data['solvents'][0][testidx]) < 0.01 and abs(j - data['bases'][0][testidx]) < 0.01: continue #print(i,j) thisSolvClasses[pos][0] = i thisBasesClasses[pos][0] = j pos +=1 result2 = model.predict( [thisSolvClasses, thisBasesClasses, thisLigand1, thisLigand2, thisTemp, thisSbs1, thisSbs2 ]) MAE.append( float(abs(result2[0]- data['yield'][testidx])) ) diffY = float(max(result2))-float(min(result2)) #print("allY", [x[0] for x in result2]) #print("AX", allx) resorted = sorted([ (x,i) for i,x in enumerate(result2)], reverse=True) #print("RE", resorted) res=[i for i,x in enumerate(resorted) if x[1] == 0] #print("RE", result2, res) #raise topN.append(res[0] ) yieldRange.append( diffY) topNproc=[] for i in range(1, 6): s1top= len([s for s in topN if s <=i])/ len(topN) topNproc.append( s1top ) #print("YILE RAGE", yieldRange) eachEpochData.append( (statistics.mean(MAE), tuple(topNproc), statistics.mean(yieldRange) ) ) print("last epoch", eachEpochData[-1]) eachFoldData.append(eachEpochData) for epochid in range(epochs): thisEpoch= [ oneFold[epochid] for oneFold in eachFoldData] topN=[ fold[1] for fold in thisEpoch] aveMAE = statistics.mean([x[0] for x in thisEpoch]) stdMAE = statistics.stdev([x[0] for x in thisEpoch]) avgYieldRange = statistics.mean([x[2] for x in thisEpoch]) topNproc=[ ] topNstdev = [] for i in range( len(topN[0])): topNproc.append( statistics.mean([fold[i] for fold in topN ]) ) topNstdev.append( statistics.stdev([fold[i] for fold in topN ]) ) print(epochid+1, "MAE", aveMAE, stdMAE, "TOPN", topNproc, topNstdev, "avgYrange", avgYieldRange) #for i in range(epochs): # print("epoch", i, statistics.mean([f['val_mean_absolute_error'][i] for f in histories]), "stdev", statistics.stdev([f['val_mean_absolute_error'][i] for f in histories]) ) def parseArgs(): import argparse parser = argparse.ArgumentParser() parser.add_argument('--input', required=True, type=str) parser.add_argument('--w1', required=True, type=int) parser.add_argument('--w2', required=True, type=int) args = parser.parse_args() return args if __name__ == "__main__": arg=parseArgs() print("ARGS", arg) data =parseData( arg.input) model=makeModel( 512, wide1=arg.w1, wide2=arg.w2 ) training( data, model, epochs=30)
[]
[]
[ "CUDA_VISIBLE_DEVICES", "TF_CPP_MIN_LOG_LEVEL" ]
[]
["CUDA_VISIBLE_DEVICES", "TF_CPP_MIN_LOG_LEVEL"]
python
2
0
pkg/v1/cli/command/core/root.go
// Copyright 2021 VMware, Inc. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package core import ( "fmt" "os" "strings" "time" "github.com/aunum/log" "github.com/briandowns/spinner" "github.com/logrusorgru/aurora" "github.com/spf13/cobra" "github.com/vmware-tanzu/tanzu-framework/apis/cli/v1alpha1" "github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli" "github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/pluginmanager" "github.com/vmware-tanzu/tanzu-framework/pkg/v1/config" ) // RootCmd is the core root Tanzu command var RootCmd = &cobra.Command{ Use: "tanzu", } var ( noInit bool color = true forceNoInit = "false" // a string variable so as to be overridable via linker flag ) // NewRootCmd creates a root command. func NewRootCmd() (*cobra.Command, error) { u := cli.NewMainUsage() RootCmd.SetUsageFunc(u.Func()) ni := os.Getenv("TANZU_CLI_NO_INIT") if ni != "" || strings.EqualFold(forceNoInit, "true") { noInit = true } if os.Getenv("TANZU_CLI_NO_COLOR") != "" { color = false } au := aurora.NewAurora(color) RootCmd.Short = au.Bold(`Tanzu CLI`).String() // TODO (pbarker): silencing usage for now as we are getting double usage from plugins on errors RootCmd.SilenceUsage = true RootCmd.AddCommand( pluginCmd, initCmd, updateCmd, versionCmd, completionCmd, configCmd, genAllDocsCmd, ) plugins, err := getAvailablePlugins() if err != nil { return nil, err } if err = config.CopyLegacyConfigDir(); err != nil { return nil, fmt.Errorf("failed to copy legacy configuration directory to new location: %w", err) } // If context-aware-discovery is not enabled // check that all plugins in the core distro are installed or do so. if !config.IsFeatureActivated(config.FeatureContextAwareDiscovery) { plugins, err = checkAndInstallMissingPlugins(plugins) if err != nil { return nil, err } } for _, plugin := range plugins { RootCmd.AddCommand(cli.GetCmd(plugin)) } duplicateAliasWarning() // Flag parsing must be deactivated because the root plugin won't know about all flags. RootCmd.DisableFlagParsing = true return RootCmd, nil } func getAvailablePlugins() ([]*v1alpha1.PluginDescriptor, error) { plugins := make([]*v1alpha1.PluginDescriptor, 0) var err error if config.IsFeatureActivated(config.FeatureContextAwareDiscovery) { currentServerName := "" server, err := config.GetCurrentServer() if err == nil && server != nil { currentServerName = server.Name } serverPlugin, standalonePlugins, err := pluginmanager.InstalledPlugins(currentServerName) if err != nil { return nil, fmt.Errorf("find installed plugins: %w", err) } p := append(serverPlugin, standalonePlugins...) for i := range p { plugins = append(plugins, &p[i]) } } else { plugins, err = cli.ListPlugins() if err != nil { return nil, fmt.Errorf("find available plugins: %w", err) } } return plugins, nil } func checkAndInstallMissingPlugins(plugins []*v1alpha1.PluginDescriptor) ([]*v1alpha1.PluginDescriptor, error) { // check that all plugins in the core distro are installed or do so. if !noInit && !cli.IsDistributionSatisfied(plugins) { s := spinner.New(spinner.CharSets[9], 100*time.Millisecond) if err := s.Color("bgBlack", "bold", "fgWhite"); err != nil { return nil, err } s.Suffix = fmt.Sprintf(" %s", "initializing") s.Start() cfg, err := config.GetClientConfig() if err != nil { log.Fatal(err) } repos := cli.NewMultiRepo(cli.LoadRepositories(cfg)...) err = cli.EnsureDistro(repos) if err != nil { return nil, err } plugins, err = cli.ListPlugins() if err != nil { return nil, fmt.Errorf("find available plugins: %w", err) } s.Stop() } return plugins, nil } func duplicateAliasWarning() { var aliasMap = make(map[string][]string) for _, command := range RootCmd.Commands() { for _, alias := range command.Aliases { aliases, ok := aliasMap[alias] if !ok { aliasMap[alias] = []string{command.Name()} } else { aliasMap[alias] = append(aliases, command.Name()) } } } for alias, plugins := range aliasMap { if len(plugins) > 1 { fmt.Fprintf(os.Stderr, "Warning, the alias %s is duplicated across plugins: %s\n\n", alias, strings.Join(plugins, ", ")) } } } // Execute executes the CLI. func Execute() error { root, err := NewRootCmd() if err != nil { return err } return root.Execute() }
[ "\"TANZU_CLI_NO_INIT\"", "\"TANZU_CLI_NO_COLOR\"" ]
[]
[ "TANZU_CLI_NO_COLOR", "TANZU_CLI_NO_INIT" ]
[]
["TANZU_CLI_NO_COLOR", "TANZU_CLI_NO_INIT"]
go
2
0
simulation/bank/sim_test.go
package simulation import ( "encoding/json" "math/rand" "testing" sdk "github.com/irisnet/irishub/types" "github.com/irisnet/irishub/modules/bank" "github.com/irisnet/irishub/simulation/mock" "github.com/irisnet/irishub/simulation/mock/simulation" ) func TestBankWithRandomMessages(t *testing.T) { mapp := mock.NewApp() bank.RegisterCodec(mapp.Cdc) mapper := mapp.AccountKeeper bankKeeper := mapp.BankKeeper mapp.Router().AddRoute("bank", []*sdk.KVStoreKey{mapp.KeyAccount}, bank.NewHandler(bankKeeper)) err := mapp.CompleteSetup() if err != nil { panic(err) } appStateFn := func(r *rand.Rand, accs []simulation.Account) json.RawMessage { simulation.RandomSetGenesis(r, mapp, accs, []string{"iris-atto"}) return json.RawMessage("{}") } simulation.Simulate( t, mapp.BaseApp, appStateFn, []simulation.WeightedOperation{ {1, SingleInputSendMsg(mapper, bankKeeper)}, }, []simulation.RandSetup{}, []simulation.Invariant{ NonnegativeBalanceInvariant(mapper), TotalCoinsInvariant(mapper, func() sdk.Coins { return mapp.TotalCoinsSupply }), }, 30, 60, false, ) }
[]
[]
[]
[]
[]
go
null
null
null
repo2docker/app.py
"""repo2docker: convert git repositories into jupyter-suitable docker images Images produced by repo2docker can be used with Jupyter notebooks standalone or with BinderHub. Usage: python -m repo2docker https://github.com/you/your-repo """ import argparse import json import sys import logging import os import pwd import shutil import tempfile import time import docker from urllib.parse import urlparse from docker.utils import kwargs_from_env from docker.errors import DockerException import escapism from pythonjsonlogger import jsonlogger from traitlets import Any, Dict, Int, List, Unicode, Bool, default from traitlets.config import Application from . import __version__ from .buildpacks import ( PythonBuildPack, DockerBuildPack, LegacyBinderDockerBuildPack, CondaBuildPack, JuliaBuildPack, RBuildPack, NixBuildPack ) from . import contentproviders from .utils import ByteSpecification, chdir class Repo2Docker(Application): """An application for converting git repositories to docker images""" name = 'jupyter-repo2docker' version = __version__ description = __doc__ @default('log_level') def _default_log_level(self): """The application's default log level""" return logging.INFO git_workdir = Unicode( None, config=True, allow_none=True, help=""" Working directory to use for check out of git repositories. The default is to use the system's temporary directory. Should be somewhere ephemeral, such as /tmp. """ ) subdir = Unicode( '', config=True, help=""" Subdirectory of the git repository to examine. Defaults to ''. """ ) cache_from = List( [], config=True, help=""" List of images to try & re-use cached image layers from. Docker only tries to re-use image layers from images built locally, not pulled from a registry. We can ask it to explicitly re-use layers from non-locally built images by through the 'cache_from' parameter. """ ) buildpacks = List( [ LegacyBinderDockerBuildPack, DockerBuildPack, JuliaBuildPack, NixBuildPack, RBuildPack, CondaBuildPack, PythonBuildPack, ], config=True, help=""" Ordered list of BuildPacks to try when building a git repository. """ ) default_buildpack = Any( PythonBuildPack, config=True, help=""" The default build pack to use when no other buildpacks are found. """ ) # Git is our content provider of last resort. This is to maintain the # old behaviour when git and local directories were the only supported # content providers. We can detect local directories from the path, but # detecting if something will successfully `git clone` is very hard if all # you can do is look at the path/URL to it. content_providers = List( [ contentproviders.Local, contentproviders.Git, ], config=True, help=""" Ordered list by priority of ContentProviders to try in turn to fetch the contents specified by the user. """ ) build_memory_limit = ByteSpecification( 0, help=""" Total memory that can be used by the docker image building process. Set to 0 for no limits. """, config=True ) volumes = Dict( {}, help=""" Volumes to mount when running the container. Only used when running, not during build process! Use a key-value pair, with the key being the volume source & value being the destination volume. Both source and destination can be relative. Source is resolved relative to the current working directory on the host, and destination is resolved relative to the working directory of the image - ($HOME by default) """, config=True ) user_id = Int( help=""" UID of the user to create inside the built image. Should be a uid that is not currently used by anything in the image. Defaults to uid of currently running user, since that is the most common case when running r2d manually. Might not affect Dockerfile builds. """, config=True ) @default('user_id') def _user_id_default(self): """ Default user_id to current running user. """ return os.geteuid() user_name = Unicode( 'jovyan', help=""" Username of the user to create inside the built image. Should be a username that is not currently used by anything in the image, and should conform to the restrictions on user names for Linux. Defaults to username of currently running user, since that is the most common case when running repo2docker manually. """, config=True ) @default('user_name') def _user_name_default(self): """ Default user_name to current running user. """ return pwd.getpwuid(os.getuid()).pw_name appendix = Unicode( config=True, help=""" Appendix of Dockerfile commands to run at the end of the build. Can be used to customize the resulting image after all standard build steps finish. """ ) json_logs = Bool( False, help=""" Log output in structured JSON format. Useful when stdout is consumed by other tools """, config=True ) repo = Unicode( ".", help=""" Specification of repository to build image for. Could be local path or git URL. """, config=True ) ref = Unicode( None, help=""" Git ref that should be built. If repo is a git repository, this ref is checked out in a local clone before repository is built. """, config=True, allow_none=True ) cleanup_checkout = Bool( False, help=""" Delete source repository after building is done. Useful when repo2docker is doing the git cloning """, config=True ) output_image_spec = Unicode( "", help=""" Docker Image name:tag to tag the built image with. Required parameter. """, config=True ) push = Bool( False, help=""" Set to true to push docker image after building """, config=True ) run = Bool( False, help=""" Run docker image after building """, config=True ) # FIXME: Refactor class to be able to do --no-build without needing # deep support for it inside other code dry_run = Bool( False, help=""" Do not actually build the docker image, just simulate it. """, config=True ) # FIXME: Refactor classes to separate build & run steps run_cmd = List( [], help=""" Command to run when running the container When left empty, a jupyter notebook is run. """, config=True ) all_ports = Bool( False, help=""" Publish all declared ports from container whiel running. Equivalent to -P option to docker run """, config=True ) ports = Dict( {}, help=""" Port mappings to establish when running the container. Equivalent to -p {key}:{value} options to docker run. {key} refers to port inside container, and {value} refers to port / host:port in the host """, config=True ) environment = List( [], help=""" Environment variables to set when running the built image. Each item must be a string formatted as KEY=VALUE """, config=True ) target_repo_dir = Unicode( '', help=""" Path inside the image where contents of the repositories are copied to. Defaults to ${HOME} if not set """, config=True ) def fetch(self, url, ref, checkout_path): """Fetch the contents of `url` and place it in `checkout_path`. The `ref` parameter specifies what "version" of the contents should be fetched. In the case of a git repository `ref` is the SHA-1 of a commit. Iterate through possible content providers until a valid provider, based on URL, is found. """ picked_content_provider = None for ContentProvider in self.content_providers: cp = ContentProvider() spec = cp.detect(url, ref=ref) if spec is not None: picked_content_provider = cp self.log.info("Picked {cp} content " "provider.\n".format(cp=cp.__class__.__name__)) break if picked_content_provider is None: self.log.error("No matching content provider found for " "{url}.".format(url=url)) for log_line in picked_content_provider.fetch( spec, checkout_path, yield_output=self.json_logs): self.log.info(log_line, extra=dict(phase='fetching')) if not self.output_image_spec: self.output_image_spec = ( 'r2d' + escapism.escape(self.repo, escape_char='-').lower() ) # if we are building from a subdirectory include that in the # image name so we can tell builds from different sub-directories # apart. if self.subdir: self.output_image_spec += ( escapism.escape(self.subdir, escape_char='-').lower() ) if picked_content_provider.content_id is not None: self.output_image_spec += picked_content_provider.content_id else: self.output_image_spec += str(int(time.time())) def json_excepthook(self, etype, evalue, traceback): """Called on an uncaught exception when using json logging Avoids non-JSON output on errors when using --json-logs """ self.log.error("Error during build: %s", evalue, exc_info=(etype, evalue, traceback), extra=dict(phase='failed')) def initialize(self): """Init repo2docker configuration before start""" # FIXME: Remove this function, move it to setters / traitlet reactors if self.json_logs: # register JSON excepthook to avoid non-JSON output on errors sys.excepthook = self.json_excepthook # Need to reset existing handlers, or we repeat messages logHandler = logging.StreamHandler() formatter = jsonlogger.JsonFormatter() logHandler.setFormatter(formatter) self.log = logging.getLogger("repo2docker") self.log.handlers = [] self.log.addHandler(logHandler) self.log.setLevel(logging.INFO) else: # due to json logger stuff above, # our log messages include carriage returns, newlines, etc. # remove the additional newline from the stream handler self.log.handlers[0].terminator = '' # We don't want a [Repo2Docker] on all messages self.log.handlers[0].formatter = logging.Formatter( fmt='%(message)s' ) if self.dry_run and (self.run or self.push): raise ValueError("Cannot push or run image if we are not building it") if self.volumes and not self.run: raise ValueError("Cannot mount volumes if container is not run") def push_image(self): """Push docker image to registry""" client = docker.APIClient(version='auto', **kwargs_from_env()) # Build a progress setup for each layer, and only emit per-layer # info every 1.5s layers = {} last_emit_time = time.time() for lines in client.push(self.output_image_spec, stream=True): #print('\n' + lines.decode('utf-8')) lineS = lines.decode('utf-8').splitlines() #print(lineS) for line in lineS: progress = json.loads(line) if 'error' in progress: self.log.error(progress['error'], extra=dict(phase='failed')) raise docker.errors.ImageLoadError(progress['error']) if 'id' not in progress: continue if 'progressDetail' in progress and progress['progressDetail']: layers[progress['id']] = progress['progressDetail'] progressbar = progress['progress'] sys.stdout.write(progressbar + '\r') sys.stdout.flush() else: layers[progress['id']] = progress['status'] if time.time() - last_emit_time > 1.5: self.log.info('', extra=dict(progress=layers, phase='pushing')) last_emit_time = time.time() self.log.info('Successfully pushed {}'.format(self.output_image_spec), extra=dict(phase='pushing')) def run_image(self): """Run docker container from built image and wait for it to finish. """ container = self.start_container() self.wait_for_container(container) def start_container(self): """Start docker container from built image Returns running container """ client = docker.from_env(version='auto') docker_host = os.environ.get('DOCKER_HOST') if docker_host: host_name = urlparse(docker_host).hostname else: host_name = '127.0.0.1' self.hostname = host_name if not self.run_cmd: port = str(self._get_free_port()) self.port = port # To use the option --NotebookApp.custom_display_url # make sure the base-notebook image is updated: # docker pull jupyter/base-notebook run_cmd = [ 'jupyter', 'notebook', '--ip', '0.0.0.0', '--port', port, "--NotebookApp.custom_display_url=http://{}:{}".format(host_name, port), ] ports = {'%s/tcp' % port: port} else: # run_cmd given by user, if port is also given then pass it on run_cmd = self.run_cmd if self.ports: ports = self.ports else: ports = {} # store ports on self so they can be retrieved in tests self.ports = ports container_volumes = {} if self.volumes: api_client = docker.APIClient( version='auto', **docker.utils.kwargs_from_env() ) image = api_client.inspect_image(self.output_image_spec) image_workdir = image['ContainerConfig']['WorkingDir'] for k, v in self.volumes.items(): container_volumes[os.path.abspath(k)] = { 'bind': v if v.startswith('/') else os.path.join(image_workdir, v), 'mode': 'rw' } container = client.containers.run( self.output_image_spec, publish_all_ports=self.all_ports, ports=ports, detach=True, command=run_cmd, volumes=container_volumes, environment=self.environment ) while container.status == 'created': time.sleep(0.5) container.reload() return container def wait_for_container(self, container): """Wait for a container to finish Displaying logs while it's running """ try: for line in container.logs(stream=True): self.log.info(line.decode('utf-8'), extra=dict(phase='running')) finally: container.reload() if container.status == 'running': self.log.info('Stopping container...\n', extra=dict(phase='running')) container.kill() exit_code = container.attrs['State']['ExitCode'] container.remove() if exit_code: sys.exit(exit_code) def _get_free_port(self): """ Hacky method to get a free random port on local host """ import socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("", 0)) port = s.getsockname()[1] s.close() return port def find_image(self): # if this is a dry run it is Ok for dockerd to be unreachable so we # always return False for dry runs. if self.dry_run: return False # check if we already have an image for this content client = docker.APIClient(version='auto', **kwargs_from_env()) for image in client.images(): if image['RepoTags'] is not None: for tag in image['RepoTags']: if tag == self.output_image_spec + ":latest": return True return False def build(self): """ Build docker image """ # Check if r2d can connect to docker daemon if not self.dry_run: try: docker_client = docker.APIClient(version='auto', **kwargs_from_env()) except DockerException as e: self.log.exception(e) raise # If the source to be executed is a directory, continue using the # directory. In the case of a local directory, it is used as both the # source and target. Reusing a local directory seems better than # making a copy of it as it might contain large files that would be # expensive to copy. if os.path.isdir(self.repo): checkout_path = self.repo else: if self.git_workdir is None: checkout_path = tempfile.mkdtemp(prefix='repo2docker') else: checkout_path = self.git_workdir try: self.fetch(self.repo, self.ref, checkout_path) if self.find_image(): self.log.info("Reusing existing image ({}), not " "building.".format(self.output_image_spec)) # no need to build, so skip to the end by `return`ing here # this will still execute the finally clause and let's us # avoid having to indent the build code by an extra level return if self.subdir: checkout_path = os.path.join(checkout_path, self.subdir) if not os.path.isdir(checkout_path): self.log.error('Subdirectory %s does not exist', self.subdir, extra=dict(phase='failure')) raise FileNotFoundError('Could not find {}'.format(checkout_path)) with chdir(checkout_path): for BP in self.buildpacks: bp = BP() if bp.detect(): picked_buildpack = bp break else: picked_buildpack = self.default_buildpack() picked_buildpack.appendix = self.appendix # Add metadata labels picked_buildpack.labels['repo2docker.version'] = self.version repo_label = 'local' if os.path.isdir(self.repo) else self.repo picked_buildpack.labels['repo2docker.repo'] = repo_label picked_buildpack.labels['repo2docker.ref'] = self.ref self.log.debug(picked_buildpack.render(), extra=dict(phase='building')) if not self.dry_run: build_args = { 'NB_USER': self.user_name, 'NB_UID': str(self.user_id), } if self.target_repo_dir: build_args['REPO_DIR'] = self.target_repo_dir self.log.info('Using %s builder\n', bp.__class__.__name__, extra=dict(phase='building')) for l in picked_buildpack.build(docker_client, self.output_image_spec, self.build_memory_limit, build_args, self.cache_from): if 'stream' in l: self.log.info(l['stream'], extra=dict(phase='building')) elif 'error' in l: self.log.info(l['error'], extra=dict(phase='failure')) raise docker.errors.BuildError(l['error'], build_log='') elif 'status' in l: self.log.info('Fetching base image...\r', extra=dict(phase='building')) else: self.log.info(json.dumps(l), extra=dict(phase='building')) finally: # Cleanup checkout if necessary if self.cleanup_checkout: shutil.rmtree(checkout_path, ignore_errors=True) def start(self): self.build() if self.push: self.push_image() if self.run: self.run_image()
[]
[]
[ "DOCKER_HOST" ]
[]
["DOCKER_HOST"]
python
1
0
staging/operator-lifecycle-manager/test/e2e/e2e_test.go
package e2e import ( "context" "flag" "fmt" "os" "path" "testing" "time" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "github.com/operator-framework/api/pkg/operators/v1" "github.com/operator-framework/operator-lifecycle-manager/test/e2e/ctx" ) var ( kubeConfigPath = flag.String( "kubeconfig", "", "path to the kubeconfig file") namespace = flag.String( "namespace", "", "namespace where tests will run") olmNamespace = flag.String( "olmNamespace", "", "namespace where olm is running") communityOperators = flag.String( "communityOperators", "quay.io/operator-framework/upstream-community-operators@sha256:098457dc5e0b6ca9599bd0e7a67809f8eca397907ca4d93597380511db478fec", "reference to upstream-community-operators image") dummyImage = flag.String( "dummyImage", "bitnami/nginx:latest", "dummy image to treat as an operator in tests") testNamespace = "" operatorNamespace = "" communityOperatorsImage = "" ) func TestEndToEnd(t *testing.T) { RegisterFailHandler(Fail) SetDefaultEventuallyTimeout(1 * time.Minute) SetDefaultEventuallyPollingInterval(1 * time.Second) SetDefaultConsistentlyDuration(30 * time.Second) SetDefaultConsistentlyPollingInterval(1 * time.Second) if junitDir := os.Getenv("JUNIT_DIRECTORY"); junitDir != "" { junitReporter := reporters.NewJUnitReporter(path.Join(junitDir, fmt.Sprintf("junit_e2e_%02d.xml", config.GinkgoConfig.ParallelNode))) RunSpecsWithDefaultAndCustomReporters(t, "End-to-end", []Reporter{junitReporter}) } else { RunSpecs(t, "End-to-end") } } var deprovision func() = func() {} // This function initializes a client which is used to create an operator group for a given namespace var _ = BeforeSuite(func() { if kubeConfigPath != nil && *kubeConfigPath != "" { // This flag can be deprecated in favor of the kubeconfig provisioner: os.Setenv("KUBECONFIG", *kubeConfigPath) } testNamespace = *namespace operatorNamespace = *olmNamespace communityOperatorsImage = *communityOperators deprovision = ctx.MustProvision(ctx.Ctx()) ctx.MustInstall(ctx.Ctx()) groups, err := ctx.Ctx().OperatorClient().OperatorsV1().OperatorGroups(testNamespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { panic(err) } if len(groups.Items) == 0 { _, err = ctx.Ctx().OperatorClient().OperatorsV1().OperatorGroups(testNamespace).Create(context.TODO(), &v1.OperatorGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "opgroup", Namespace: testNamespace, }, }, metav1.CreateOptions{}) if err != nil { panic(err) } } }) var _ = AfterSuite(func() { deprovision() })
[ "\"JUNIT_DIRECTORY\"" ]
[]
[ "JUNIT_DIRECTORY" ]
[]
["JUNIT_DIRECTORY"]
go
1
0
vendor/github.com/terraform-providers/terraform-provider-vsphere/vsphere/provider.go
package vsphere import ( "os" "path/filepath" "time" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/terraform" ) // defaultAPITimeout is a default timeout value that is passed to functions // requiring contexts, and other various waiters. const defaultAPITimeout = time.Minute * 5 // Provider returns a terraform.ResourceProvider. func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "user": { Type: schema.TypeString, Required: true, DefaultFunc: schema.EnvDefaultFunc("VSPHERE_USER", nil), Description: "The user name for vSphere API operations.", }, "password": { Type: schema.TypeString, Required: true, DefaultFunc: schema.EnvDefaultFunc("VSPHERE_PASSWORD", nil), Description: "The user password for vSphere API operations.", }, "vsphere_server": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VSPHERE_SERVER", nil), Description: "The vSphere Server name for vSphere API operations.", }, "allow_unverified_ssl": { Type: schema.TypeBool, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VSPHERE_ALLOW_UNVERIFIED_SSL", false), Description: "If set, VMware vSphere client will permit unverifiable SSL certificates.", }, "vcenter_server": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VSPHERE_VCENTER", nil), Deprecated: "This field has been renamed to vsphere_server.", }, "client_debug": { Type: schema.TypeBool, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VSPHERE_CLIENT_DEBUG", false), Description: "govmomi debug", }, "client_debug_path_run": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VSPHERE_CLIENT_DEBUG_PATH_RUN", ""), Description: "govmomi debug path for a single run", }, "client_debug_path": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VSPHERE_CLIENT_DEBUG_PATH", ""), Description: "govmomi debug path for debug", }, "persist_session": { Type: schema.TypeBool, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VSPHERE_PERSIST_SESSION", false), Description: "Persist vSphere client sessions to disk", }, "vim_session_path": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VSPHERE_VIM_SESSION_PATH", filepath.Join(os.Getenv("HOME"), ".govmomi", "sessions")), Description: "The directory to save vSphere SOAP API sessions to", }, "rest_session_path": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VSPHERE_REST_SESSION_PATH", filepath.Join(os.Getenv("HOME"), ".govmomi", "rest_sessions")), Description: "The directory to save vSphere REST API sessions to", }, "vim_keep_alive": { Type: schema.TypeInt, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VSPHERE_VIM_KEEP_ALIVE", 10), Description: "Keep alive interval for the VIM session in minutes", }, }, ResourcesMap: map[string]*schema.Resource{ "vsphere_compute_cluster": resourceVSphereComputeCluster(), "vsphere_compute_cluster_host_group": resourceVSphereComputeClusterHostGroup(), "vsphere_compute_cluster_vm_affinity_rule": resourceVSphereComputeClusterVMAffinityRule(), "vsphere_compute_cluster_vm_anti_affinity_rule": resourceVSphereComputeClusterVMAntiAffinityRule(), "vsphere_compute_cluster_vm_dependency_rule": resourceVSphereComputeClusterVMDependencyRule(), "vsphere_compute_cluster_vm_group": resourceVSphereComputeClusterVMGroup(), "vsphere_compute_cluster_vm_host_rule": resourceVSphereComputeClusterVMHostRule(), "vsphere_content_library": resourceVSphereContentLibrary(), "vsphere_content_library_item": resourceVSphereContentLibraryItem(), "vsphere_custom_attribute": resourceVSphereCustomAttribute(), "vsphere_datacenter": resourceVSphereDatacenter(), "vsphere_datastore_cluster": resourceVSphereDatastoreCluster(), "vsphere_datastore_cluster_vm_anti_affinity_rule": resourceVSphereDatastoreClusterVMAntiAffinityRule(), "vsphere_distributed_port_group": resourceVSphereDistributedPortGroup(), "vsphere_distributed_virtual_switch": resourceVSphereDistributedVirtualSwitch(), "vsphere_drs_vm_override": resourceVSphereDRSVMOverride(), "vsphere_dpm_host_override": resourceVSphereDPMHostOverride(), "vsphere_file": resourceVSphereFile(), "vsphere_folder": resourceVSphereFolder(), "vsphere_ha_vm_override": resourceVSphereHAVMOverride(), "vsphere_host_port_group": resourceVSphereHostPortGroup(), "vsphere_host_virtual_switch": resourceVSphereHostVirtualSwitch(), "vsphere_license": resourceVSphereLicense(), "vsphere_resource_pool": resourceVSphereResourcePool(), "vsphere_tag": resourceVSphereTag(), "vsphere_tag_category": resourceVSphereTagCategory(), "vsphere_virtual_disk": resourceVSphereVirtualDisk(), "vsphere_virtual_machine": resourceVSphereVirtualMachine(), "vsphere_nas_datastore": resourceVSphereNasDatastore(), "vsphere_storage_drs_vm_override": resourceVSphereStorageDrsVMOverride(), "vsphere_vapp_container": resourceVSphereVAppContainer(), "vsphere_vapp_entity": resourceVSphereVAppEntity(), "vsphere_vmfs_datastore": resourceVSphereVmfsDatastore(), "vsphere_virtual_machine_snapshot": resourceVSphereVirtualMachineSnapshot(), "vsphere_host": resourceVsphereHost(), "vsphere_vnic": resourceVsphereNic(), }, DataSourcesMap: map[string]*schema.Resource{ "vsphere_compute_cluster": dataSourceVSphereComputeCluster(), "vsphere_content_library": dataSourceVSphereContentLibrary(), "vsphere_content_library_item": dataSourceVSphereContentLibraryItem(), "vsphere_custom_attribute": dataSourceVSphereCustomAttribute(), "vsphere_datacenter": dataSourceVSphereDatacenter(), "vsphere_datastore": dataSourceVSphereDatastore(), "vsphere_datastore_cluster": dataSourceVSphereDatastoreCluster(), "vsphere_distributed_virtual_switch": dataSourceVSphereDistributedVirtualSwitch(), "vsphere_folder": dataSourceVSphereFolder(), "vsphere_host": dataSourceVSphereHost(), "vsphere_network": dataSourceVSphereNetwork(), "vsphere_resource_pool": dataSourceVSphereResourcePool(), "vsphere_storage_policy": dataSourceVSphereStoragePolicy(), "vsphere_tag": dataSourceVSphereTag(), "vsphere_tag_category": dataSourceVSphereTagCategory(), "vsphere_vapp_container": dataSourceVSphereVAppContainer(), "vsphere_virtual_machine": dataSourceVSphereVirtualMachine(), "vsphere_vmfs_disks": dataSourceVSphereVmfsDisks(), }, ConfigureFunc: providerConfigure, } } func providerConfigure(d *schema.ResourceData) (interface{}, error) { c, err := NewConfig(d) if err != nil { return nil, err } return c.Client() }
[ "\"HOME\"", "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
main.go
package main import ( "log" "net/http" "os" ) var ( port = os.Getenv("PORT") ) // cors adds required headers to responses such that direct access works. // // These are not required if using "proxy" access. func cors(f http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Headers", "accept, content-type") w.Header().Set("Access-Control-Allow-Methods", "POST") w.Header().Set("Access-Control-Allow-Origin", "*") f(w, r) } } func main() { if port == "" { port = "4444" } srv := &http.Server{Addr: ":" + port} s, err := NewSever() if err != nil { log.Fatal("error creating server", err) } mux := http.NewServeMux() mux.HandleFunc("/", cors(s.handle)) srv.Handler = mux log.Printf("Serving on https://0.0.0.0:" + port) log.Fatal(srv.ListenAndServe()) }
[ "\"PORT\"" ]
[]
[ "PORT" ]
[]
["PORT"]
go
1
0
lib/http/triv.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build ignore package main import ( "bytes" "expvar" "flag" "fmt" "io" "log" "os" "os/exec" "strconv" "sync" "github.com/zmap/zgrab2/lib/http" ) // hello world, the web server var helloRequests = expvar.NewInt("hello-requests") func HelloServer(w http.ResponseWriter, req *http.Request) { helloRequests.Add(1) io.WriteString(w, "hello, world!\n") } // Simple counter server. POSTing to it will set the value. type Counter struct { mu sync.Mutex // protects n n int } // This makes Counter satisfy the expvar.Var interface, so we can export // it directly. func (ctr *Counter) String() string { ctr.mu.Lock() defer ctr.mu.Unlock() return fmt.Sprintf("%d", ctr.n) } func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) { ctr.mu.Lock() defer ctr.mu.Unlock() switch req.Method { case "GET": ctr.n++ case "POST": buf := new(bytes.Buffer) io.Copy(buf, req.Body) body := buf.String() if n, err := strconv.Atoi(body); err != nil { fmt.Fprintf(w, "bad POST: %v\nbody: [%v]\n", err, body) } else { ctr.n = n fmt.Fprint(w, "counter reset\n") } } fmt.Fprintf(w, "counter = %d\n", ctr.n) } // simple flag server var booleanflag = flag.Bool("boolean", true, "another flag for testing") func FlagServer(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "text/plain; charset=utf-8") fmt.Fprint(w, "Flags:\n") flag.VisitAll(func(f *flag.Flag) { if f.Value.String() != f.DefValue { fmt.Fprintf(w, "%s = %s [default = %s]\n", f.Name, f.Value.String(), f.DefValue) } else { fmt.Fprintf(w, "%s = %s\n", f.Name, f.Value.String()) } }) } // simple argument server func ArgServer(w http.ResponseWriter, req *http.Request) { for _, s := range os.Args { fmt.Fprint(w, s, " ") } } // a channel (just for the fun of it) type Chan chan int func ChanCreate() Chan { c := make(Chan) go func(c Chan) { for x := 0; ; x++ { c <- x } }(c) return c } func (ch Chan) ServeHTTP(w http.ResponseWriter, req *http.Request) { io.WriteString(w, fmt.Sprintf("channel send #%d\n", <-ch)) } // exec a program, redirecting output func DateServer(rw http.ResponseWriter, req *http.Request) { rw.Header().Set("Content-Type", "text/plain; charset=utf-8") date, err := exec.Command("/bin/date").Output() if err != nil { http.Error(rw, err.Error(), 500) return } rw.Write(date) } func Logger(w http.ResponseWriter, req *http.Request) { log.Print(req.URL) http.Error(w, "oops", 404) } var webroot = flag.String("root", os.Getenv("HOME"), "web root directory") func main() { flag.Parse() // The counter is published as a variable directly. ctr := new(Counter) expvar.Publish("counter", ctr) http.Handle("/counter", ctr) http.Handle("/", http.HandlerFunc(Logger)) http.Handle("/go/", http.StripPrefix("/go/", http.FileServer(http.Dir(*webroot)))) http.Handle("/chan", ChanCreate()) http.HandleFunc("/flags", FlagServer) http.HandleFunc("/args", ArgServer) http.HandleFunc("/go/hello", HelloServer) http.HandleFunc("/date", DateServer) log.Fatal(http.ListenAndServe(":12345", nil)) }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
scp_client/copy_dir/scp_clientv3.go
/* This script will copy a directory and its contents over to a remote device. One of the challenges is that the directory needed to be created on the remote device prior to copying files over. */ package main import ( "fmt" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "log" "net" "os" "github.com/tmc/scp" ) func getAgent() (agent.Agent, error) { agentConn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")) return agent.NewClient(agentConn), err } func scpFunc(dir, file string) { f := dir + "/" + file src, err := os.Open(f) if err != nil { fmt.Println(err) } src.Close() user := "developer" pass := "C1sco12345" host := "sandbox-iosxe-latest-1.cisco.com:22" client, err := ssh.Dial("tcp", host, &ssh.ClientConfig{ User: user, Auth: []ssh.AuthMethod{ ssh.Password(pass), }, HostKeyCallback: ssh.InsecureIgnoreHostKey(), // FIXME: please be more secure in checking host keys }) if err != nil { log.Fatalln("Failed to dial:", err) } session, err := client.NewSession() if err != nil { log.Fatalln("Failed to create session: " + err.Error()) } dest := src.Name() err = scp.CopyPath(src.Name(), dest, session) if _, err := os.Stat(dest); os.IsNotExist(err) { fmt.Printf("no such file or directory: %s", dest) } else { fmt.Println("success") } } func createDir(dir string) { user := "developer" pass := "C1sco12345" host := "sandbox-iosxe-latest-1.cisco.com:22" client, err := ssh.Dial("tcp", host, &ssh.ClientConfig{ User: user, Auth: []ssh.AuthMethod{ ssh.Password(pass), }, HostKeyCallback: ssh.InsecureIgnoreHostKey(), // FIXME: please be more secure in checking host keys }) if err != nil { log.Fatalln("Failed to dial:", err) } session, err := client.NewSession() if err != nil { log.Fatalln("Failed to create session: " + err.Error()) } stdin, _ := session.StdinPipe() session.Stdout = os.Stdout session.Stderr = os.Stderr session.Shell() fmt.Fprintf(stdin, "mkdir "+dir+"\n") fmt.Fprintf(stdin, "\n\n\n") fmt.Fprintf(stdin, "exit\n") session.Wait() session.Close() } func main() { dir := "test_dir" createDir(dir) files, err := os.ReadDir(dir) if err != nil { log.Fatal(err) } for _, file := range files { fmt.Println(file.Name()) scpFunc(dir, file.Name()) } } /* pi@raspberrypi:~/Code_folder/go_folder/netOps/scp_client $ ls -l total 3776 -rw-r--r-- 1 pi pi 257 Feb 25 22:40 go.mod -rw-r--r-- 1 pi pi 1384 Feb 25 22:40 go.sum -rwxr-xr-x 1 pi pi 3842756 Feb 25 22:55 scp_client -rw-r--r-- 1 pi pi 2095 Feb 25 23:57 scp_clientv3.go drwxr-xr-x 2 pi pi 4096 Feb 25 23:12 test_dir pi@raspberrypi:~/Code_folder/go_folder/netOps/scp_client $ cd test_dir/ pi@raspberrypi:~/Code_folder/go_folder/netOps/scp_client/test_dir $ ls -l total 12 -rw-r--r-- 1 pi pi 8 Feb 25 23:12 text1.txt -rw-r--r-- 1 pi pi 7 Feb 25 22:38 text2.txt -rw-r--r-- 1 pi pi 7 Feb 25 22:39 text3.txt pi@raspberrypi:~/Code_folder/go_folder/netOps/scp_client/test_dir $ pi@raspberrypi:~/Code_folder/go_folder/netOps/scp_client $ go run scp_clientv3.go Welcome to the DevNet Sandbox for CSR1000v and IOS XE The following programmability features are already enabled: - NETCONF - RESTCONF Thanks for stopping by. csr1000v-1#mkdir test_dir Create directory filename [test_dir]? Created dir bootflash:/test_dir csr1000v-1# csr1000v-1# csr1000v-1#exit text1.txt success text2.txt success text3.txt success pi@raspberrypi:~/Code_folder/go_folder/netOps/scp_client $ //remote device// csr1000v-1#dir Directory of bootflash:/ 201601 drwx 20480 Feb 26 2021 05:08:57 +00:00 tracelogs 153217 drwx 4096 Feb 26 2021 04:58:12 +00:00 test_dir 241921 drwx 4096 Feb 26 2021 04:47:23 +00:00 exit 80641 drwx 4096 Feb 25 2021 23:49:45 +00:00 .installer 22 -rw- 157 Feb 25 2021 23:49:04 +00:00 csrlxc-cfg.log 137089 drwx 4096 Feb 25 2021 23:49:04 +00:00 license_evlog 19 -rw- 2288 Feb 25 2021 23:49:02 +00:00 cvac.log 18 -rw- 30 Feb 25 2021 23:49:00 +00:00 throughput_monitor_params 15 -rw- 1216 Feb 25 2021 23:48:06 +00:00 mode_event_log 64513 drwx 4096 Sep 1 2020 14:51:38 +00:00 .dbpersist 274177 drwx 4096 Sep 1 2020 14:51:34 +00:00 onep 21 -rw- 16 Sep 1 2020 14:51:32 +00:00 ovf-env.xml.md5 20 -rw- 1 Sep 1 2020 14:51:32 +00:00 .cvac_version 104833 drwx 4096 Sep 1 2020 14:51:29 +00:00 pnp-info 145153 drwx 4096 Sep 1 2020 14:50:48 +00:00 virtual-instance 17 -rwx 1314 Sep 1 2020 14:50:21 +00:00 trustidrootx3_ca.ca 16 -rw- 20109 Sep 1 2020 14:50:21 +00:00 ios_core.p7b 193537 drwx 4096 Sep 1 2020 14:50:18 +00:00 gs_script 40321 drwx 4096 Sep 1 2020 14:50:16 +00:00 core 169345 drwx 4096 Sep 1 2020 14:50:12 +00:00 bootlog_history 161281 drwx 4096 Sep 1 2020 14:50:07 +00:00 .prst_sync 14 -rw- 1105 Sep 1 2020 14:49:08 +00:00 packages.conf 13 -rw- 48321761 Sep 1 2020 14:49:08 +00:00 csr1000v-rpboot.17.03.01a.SPA.pkg 12 -rw- 470611036 Sep 1 2020 14:49:08 +00:00 csr1000v-mono-universalk9.17.03.01a.SPA.pkg 8065 drwx 4096 Sep 1 2020 14:49:03 +00:00 .rollback_timer 11 drwx 16384 Sep 1 2020 14:48:15 +00:00 lost+found 6286540800 bytes total (5434560512 bytes free) csr1000v-1#cd test csr1000v-1#cd test_dir csr1000v-1#dir Directory of bootflash:/test_dir/ 153220 -rw- 7 Feb 26 2021 04:58:12 +00:00 text3.txt 153219 -rw- 7 Feb 26 2021 04:58:11 +00:00 text2.txt 153218 -rw- 8 Feb 26 2021 04:58:10 +00:00 text1.txt 6286540800 bytes total (5434560512 bytes free) csr1000v-1# */
[ "\"SSH_AUTH_SOCK\"" ]
[]
[ "SSH_AUTH_SOCK" ]
[]
["SSH_AUTH_SOCK"]
go
1
0
bot.py
import asyncio import datetime import gettext import os import coc import discord from coc.errors import NotFound t = gettext.translation('bot', 'locale', languages=['ko'], fallback=True) _ = t.gettext print(_('Logging in to COC API...')) email = os.getenv('COC_BOT_EMAIL') password = os.getenv('COC_BOT_PASSWORD') coc_client = coc.login(email, password, client=coc.EventsClient) clan_tag = '#U8YJLRQU' members_last_updated = {} recently_notified_war_clans = set() dc_bot_token = os.getenv('DISCORD_BOT_TOKEN') dc_client = discord.Client() topology_id = 653614701476839450 general_id = 653614701476839453 fellow_id = 662257591581147152 developer_id = 668082046131765248 async def wait_until_ready(): while not dc_client.is_ready(): await asyncio.sleep(60) async def send_message(guild_id, channel_id, message): await wait_until_ready() channel = dc_client.get_guild(guild_id).get_channel(channel_id) await channel.send(message) @coc_client.event async def on_clan_member_join(member, clan): await send_message(topology_id, general_id, _('{} joined the clan! :tada:').format(member.name)) @coc_client.event async def on_clan_member_leave(member, clan): await send_message(topology_id, general_id, _('{} left the clan.').format(member.name)) @coc_client.event async def on_war_state_change(current_state, war): if current_state == 'warEnded': end_msg = _('We won the war! :tada:') \ if war.status == 'won' else _('We can be better next time! :slight_smile:') statistics = '\n'.join( [ '**{}** : {} :star: - {}% ({}/2)'.format( member.name, sum(attack.stars for attack in member.attacks), int(sum(attack.destruction for attack in member.attacks) / 2), len(member.attacks) ) for member in sorted(war.clan.members, key=lambda member: ( sum(attack.stars for attack in member.attacks), sum(attack.destruction for attack in member.attacks), ), reverse=True) ] ) await send_message( topology_id, general_id, '{}\n{}'.format(end_msg, statistics) ) elif current_state == 'preparation': await send_message( topology_id, general_id, _('War versus clan **{}** preparation has started!').format(war.opponent.name) ) elif current_state == 'inWar': await send_message( topology_id, general_id, _('War versus clan **{}** has started! :rocket:').format(war.opponent.name) ) async def watch_clan_war(): # Check for regular clan war try: clan_war = await coc_client.get_clan_war(clan_tag) if clan_war.state != 'inWar': # Check for league war league_group = await coc_client.get_league_group(clan_tag) war_coro = [] for war_round in league_group.rounds: for war_id in war_round: if war_id != '#0': war_coro.append(coc_client.get_league_war(war_id)) war_list = await asyncio.gather(*war_coro) for war in war_list: if war.state == 'inWar': if war.clan.tag == clan_tag or war.opponent.tag == clan_tag: clan_war = war war_clan = war.clan if war.clan.tag == clan_tag else war.opponent break else: # Currently not in war return else: war_clan = clan_war.clan except NotFound: # Currently not in war return now = datetime.datetime.utcnow() war_end = clan_war.end_time.time global recently_notified_war_clans war_clans = {clan_war.clan.tag, clan_war.opponent.tag} if war_end - now < datetime.timedelta(hours=1): if recently_notified_war_clans != war_clans: recently_notified_war_clans = war_clans unused_members = {member.name for member in war_clan.members if not member.attacks} discord_members_mentions = ' '.join( ['<@{}>'.format(member.id) for member in dc_client.get_guild(topology_id).members if member.nick in unused_members] ) await send_message( topology_id, general_id, _('Clan war is less than an hour left! :rocket:\n{}').format(discord_members_mentions) ) async def watch_clan_war_periodic(timeout=600): while True: await watch_clan_war() await asyncio.sleep(timeout) def main(): print(_('Fetching clan data...')) loop = asyncio.get_event_loop() clan = loop.run_until_complete(coc_client.get_clan(clan_tag)) print(_('Logging in to Discord API...')) loop.run_until_complete(dc_client.login(dc_bot_token)) asyncio.ensure_future(dc_client.connect()) global members_last_updated members_last_updated = {player.tag: datetime.datetime.now() for player in clan.members} coc_client.add_clan_update(clan_tag) coc_client.add_war_update(clan_tag) asyncio.ensure_future(watch_clan_war_periodic()) print(_('Initialization complete! Starting the bot...')) asyncio.ensure_future(send_message(topology_id, developer_id, _('Hello! Clash of Clans Bot is online.'))) try: loop.run_forever() except KeyboardInterrupt: loop.run_until_complete(dc_client.logout()) loop.run_until_complete(coc_client.http.close()) loop.close() if __name__ == '__main__': main()
[]
[]
[ "COC_BOT_PASSWORD", "COC_BOT_EMAIL", "DISCORD_BOT_TOKEN" ]
[]
["COC_BOT_PASSWORD", "COC_BOT_EMAIL", "DISCORD_BOT_TOKEN"]
python
3
0
cmd/command/handle_binlog/handle_insert.go
package handle_binlog import ( "encoding/json" "fmt" "github.com/siddontang/go-mysql/replication" "os" "owen2020/app/models" "owen2020/cmd/command/handle_binlog/common" "owen2020/cmd/command/handle_binlog/store" "strings" ) func handleWriteRowsEventV1(e *replication.BinlogEvent) { ev, _ := e.Event.(*replication.RowsEvent) if os.Getenv("ENABLE_DATA_STATISTICS") == "yes" { go insertRoutineStatistics(ev) } if os.Getenv("ENABLE_MODEL_STREAM") == "yes" { insertRoutineModelStream(ev) } } func insertRoutineStatistics(ev *replication.RowsEvent) { dbName := string(ev.Table.Schema) tableName := string(ev.Table.Table) store.StatisticsIncrease(dbName, tableName, "", store.INSERT, 1) } func insertRoutineModelStream(ev *replication.RowsEvent) { dbName := string(ev.Table.Schema) tableName := string(ev.Table.Table) ok := common.FilterTable(dbName, tableName) if !ok { fmt.Println("skip write", dbName, ".", tableName) return } var streams []models.DddEventStream stream := &models.DddEventStream{} stream.DbName = dbName stream.TableName = tableName stream.TransactionTag = "" stream.EventType = 1 // 此处是canal定义,和原mysql binlog event type 不同 for i := 0; i < len(ev.Rows); i++ { var allColumns []string var updatedColumns []string updatedData := make(map[string]interface{}) tableSchema := common.DBTables[string(ev.Table.Schema)+"."+string(ev.Table.Table)] for idx, value := range ev.Rows[i] { allColumns = append(allColumns, tableSchema[idx]) updatedColumns = append(updatedColumns, tableSchema[idx]) updatedData[tableSchema[idx]], _ = common.GetStringValue(value) } stream.Columns = strings.Join(allColumns, ",") stream.UpdateColumns = strings.Join(updatedColumns, ",") b, _ := json.Marshal(updatedData) stream.UpdateValue = string(b) streams = append(streams, *stream) } store.StreamAddRows(streams) }
[ "\"ENABLE_DATA_STATISTICS\"", "\"ENABLE_MODEL_STREAM\"" ]
[]
[ "ENABLE_DATA_STATISTICS", "ENABLE_MODEL_STREAM" ]
[]
["ENABLE_DATA_STATISTICS", "ENABLE_MODEL_STREAM"]
go
2
0
main.go
package main import ( "os" "github.com/beaquant/echo-vue/api" "github.com/beaquant/echo-vue/config" "github.com/beaquant/echo-vue/models" "github.com/beaquant/echo-vue/routes" "github.com/labstack/echo" ) func getPort() string { port := os.Getenv("PORT") if port == "" { port = "3000" } return port } func main() { e := echo.New() db := models.NewSqliteDB("data.db") api := api.NewAPI(db) routes.NewRoutes(api, e) //e.Use(middleware.Logger()) //e.Use(middleware.Recover()) config.Setup(e) err := e.Start(":" + getPort()) if err != nil { panic(err) } }
[ "\"PORT\"" ]
[]
[ "PORT" ]
[]
["PORT"]
go
1
0
roles/lib_openshift/library/oc_scale.py
#!/usr/bin/env python # pylint: disable=missing-docstring # flake8: noqa: T001 # ___ ___ _ _ ___ ___ _ _____ ___ ___ # / __| __| \| | __| _ \ /_\_ _| __| \ # | (_ | _|| .` | _|| / / _ \| | | _|| |) | # \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ # | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _| # | |) | (_) | | .` | (_) || | | _|| |) | | | | # |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_| # # Copyright 2016 Red Hat, Inc. and/or its affiliates # and other contributors as indicated by the @author tags. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- ''' OpenShiftCLI class that wraps the oc commands in a subprocess ''' # pylint: disable=too-many-lines from __future__ import print_function import atexit import copy import fcntl import json import time import os import re import shutil import subprocess import tempfile # pylint: disable=import-error try: import ruamel.yaml as yaml except ImportError: import yaml from ansible.module_utils.basic import AnsibleModule # -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- # -*- -*- -*- Begin included fragment: doc/scale -*- -*- -*- DOCUMENTATION = ''' --- module: oc_scale short_description: Manage openshift services through the scale parameters description: - Manage openshift services through scaling them. options: state: description: - State represents whether to scale or list the current replicas required: true default: present choices: ["present", "list"] aliases: [] kubeconfig: description: - The path for the kubeconfig file to use for authentication required: false default: /etc/origin/master/admin.kubeconfig aliases: [] debug: description: - Turn on debug output. required: false default: False aliases: [] name: description: - Name of the object that is being queried. required: false default: None aliases: [] namespace: description: - The namespace where the object lives. required: false default: default aliases: [] kind: description: - The kind of object to scale. required: false default: None choices: - rc - dc aliases: [] author: - "Kenny Woodson <[email protected]>" extends_documentation_fragment: [] ''' EXAMPLES = ''' - name: scale down a rc to 0 oc_scale: name: my-replication-controller kind: rc namespace: openshift-infra replicas: 0 - name: scale up a deploymentconfig to 2 oc_scale: name: php kind: dc namespace: my-php-app replicas: 2 ''' # -*- -*- -*- End included fragment: doc/scale -*- -*- -*- # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods,too-many-instance-attributes class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup_ext=None, backup=False): self.content = content self._separator = separator self.filename = filename self.__yaml_dict = content self.content_type = content_type self.backup = backup if backup_ext is None: self.backup_ext = ".{}".format(time.strftime("%Y%m%dT%H%M%S")) else: self.backup_ext = backup_ext self.load(content_type=self.content_type) if self.__yaml_dict is None: self.__yaml_dict = {} @property def separator(self): ''' getter method for separator ''' return self._separator @separator.setter def separator(self, inc_sep): ''' setter method for separator ''' self._separator = inc_sep @property def yaml_dict(self): ''' getter method for yaml_dict ''' return self.__yaml_dict @yaml_dict.setter def yaml_dict(self, value): ''' setter method for yaml_dict ''' self.__yaml_dict = value @staticmethod def parse_key(key, sep='.'): '''parse the key allowing the appropriate separator''' common_separators = list(Yedit.com_sep - set([sep])) return re.findall(Yedit.re_key.format(''.join(common_separators)), key) @staticmethod def valid_key(key, sep='.'): '''validate the incoming key''' common_separators = list(Yedit.com_sep - set([sep])) if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key): return False return True # pylint: disable=too-many-return-statements,too-many-branches @staticmethod def remove_entry(data, key, index=None, value=None, sep='.'): ''' remove data at location key ''' if key == '' and isinstance(data, dict): if value is not None: data.pop(value) elif index is not None: raise YeditException("remove_entry for a dictionary does not have an index {}".format(index)) else: data.clear() return True elif key == '' and isinstance(data, list): ind = None if value is not None: try: ind = data.index(value) except ValueError: return False elif index is not None: ind = index else: del data[:] if ind is not None: data.pop(ind) return True if not (key and Yedit.valid_key(key, sep)) and \ isinstance(data, (list, dict)): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key and isinstance(data, dict): data = data.get(dict_key) elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: return None # process last index for remove # expected list entry if key_indexes[-1][0]: if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501 del data[int(key_indexes[-1][0])] return True # expected dict entry elif key_indexes[-1][1]: if isinstance(data, dict): del data[key_indexes[-1][1]] return True @staticmethod def add_entry(data, key, item=None, sep='.'): ''' Get an item from a dictionary with key notation a.b.c d = {'a': {'b': 'c'}}} key = a#b return c ''' if key == '': pass elif (not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict))): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key: if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501 data = data[dict_key] continue elif data and not isinstance(data, dict): raise YeditException("Unexpected item type found while going through key " + "path: {} (at key: {})".format(key, dict_key)) data[dict_key] = {} data = data[dict_key] elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: raise YeditException("Unexpected item type found while going through key path: {}".format(key)) if key == '': data = item # process last index for add # expected list entry elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501 data[int(key_indexes[-1][0])] = item # expected dict entry elif key_indexes[-1][1] and isinstance(data, dict): data[key_indexes[-1][1]] = item # didn't add/update to an existing list, nor add/update key to a dict # so we must have been provided some syntax like a.b.c[<int>] = "data" for a # non-existent array else: raise YeditException("Error adding to object at path: {}".format(key)) return data @staticmethod def get_entry(data, key, sep='.'): ''' Get an item from a dictionary with key notation a.b.c d = {'a': {'b': 'c'}}} key = a.b return c ''' if key == '': pass elif (not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict))): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes: if dict_key and isinstance(data, dict): data = data.get(dict_key) elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: return None return data @staticmethod def _write(filename, contents): ''' Actually write the file contents to disk. This helps with mocking. ''' tmp_filename = filename + '.yedit' with open(tmp_filename, 'w') as yfd: fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB) yfd.write(contents) fcntl.flock(yfd, fcntl.LOCK_UN) os.rename(tmp_filename, filename) def write(self): ''' write to file ''' if not self.filename: raise YeditException('Please specify a filename.') if self.backup and self.file_exists(): shutil.copy(self.filename, '{}{}'.format(self.filename, self.backup_ext)) # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass # Try to use RoundTripDumper if supported. if self.content_type == 'yaml': try: Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) except AttributeError: Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) elif self.content_type == 'json': Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True)) else: raise YeditException('Unsupported content_type: {}.'.format(self.content_type) + 'Please specify a content_type of yaml or json.') return (True, self.yaml_dict) def read(self): ''' read from file ''' # check if it exists if self.filename is None or not self.file_exists(): return None contents = None with open(self.filename) as yfd: contents = yfd.read() return contents def file_exists(self): ''' return whether file exists ''' if os.path.exists(self.filename): return True return False def load(self, content_type='yaml'): ''' return yaml file ''' contents = self.read() if not contents and not self.content: return None if self.content: if isinstance(self.content, dict): self.yaml_dict = self.content return self.yaml_dict elif isinstance(self.content, str): contents = self.content # check if it is yaml try: if content_type == 'yaml' and contents: # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass # Try to use RoundTripLoader if supported. try: self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader) except AttributeError: self.yaml_dict = yaml.safe_load(contents) # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass elif content_type == 'json' and contents: self.yaml_dict = json.loads(contents) except yaml.YAMLError as err: # Error loading yaml or json raise YeditException('Problem with loading yaml file. {}'.format(err)) return self.yaml_dict def get(self, key): ''' get a specified key''' try: entry = Yedit.get_entry(self.yaml_dict, key, self.separator) except KeyError: entry = None return entry def pop(self, path, key_or_item): ''' remove a key, value pair from a dict or an item for a list''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry is None: return (False, self.yaml_dict) if isinstance(entry, dict): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member if key_or_item in entry: entry.pop(key_or_item) return (True, self.yaml_dict) return (False, self.yaml_dict) elif isinstance(entry, list): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member ind = None try: ind = entry.index(key_or_item) except ValueError: return (False, self.yaml_dict) entry.pop(ind) return (True, self.yaml_dict) return (False, self.yaml_dict) def delete(self, path, index=None, value=None): ''' remove path from a dict''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry is None: return (False, self.yaml_dict) result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator) if not result: return (False, self.yaml_dict) return (True, self.yaml_dict) def exists(self, path, value): ''' check if value exists at path''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if isinstance(entry, list): if value in entry: return True return False elif isinstance(entry, dict): if isinstance(value, dict): rval = False for key, val in value.items(): if entry[key] != val: rval = False break else: rval = True return rval return value in entry return entry == value def append(self, path, value): '''append value to a list''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry is None: self.put(path, []) entry = Yedit.get_entry(self.yaml_dict, path, self.separator) if not isinstance(entry, list): return (False, self.yaml_dict) # AUDIT:maybe-no-member makes sense due to loading data from # a serialized format. # pylint: disable=maybe-no-member entry.append(value) return (True, self.yaml_dict) # pylint: disable=too-many-arguments def update(self, path, value, index=None, curr_value=None): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if isinstance(entry, dict): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member if not isinstance(value, dict): raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' + 'value=[{}] type=[{}]'.format(value, type(value))) entry.update(value) return (True, self.yaml_dict) elif isinstance(entry, list): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member ind = None if curr_value: try: ind = entry.index(curr_value) except ValueError: return (False, self.yaml_dict) elif index is not None: ind = index if ind is not None and entry[ind] != value: entry[ind] = value return (True, self.yaml_dict) # see if it exists in the list try: ind = entry.index(value) except ValueError: # doesn't exist, append it entry.append(value) return (True, self.yaml_dict) # already exists, return if ind is not None: return (False, self.yaml_dict) return (False, self.yaml_dict) def put(self, path, value): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry == value: return (False, self.yaml_dict) # deepcopy didn't work # Try to use ruamel.yaml and fallback to pyyaml try: tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader) except AttributeError: tmp_copy = copy.deepcopy(self.yaml_dict) # set the format attributes if available try: tmp_copy.fa.set_block_style() except AttributeError: pass result = Yedit.add_entry(tmp_copy, path, value, self.separator) if result is None: return (False, self.yaml_dict) # When path equals "" it is a special case. # "" refers to the root of the document # Only update the root path (entire document) when its a list or dict if path == '': if isinstance(result, list) or isinstance(result, dict): self.yaml_dict = result return (True, self.yaml_dict) return (False, self.yaml_dict) self.yaml_dict = tmp_copy return (True, self.yaml_dict) def create(self, path, value): ''' create a yaml file ''' if not self.file_exists(): # deepcopy didn't work # Try to use ruamel.yaml and fallback to pyyaml try: tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader) except AttributeError: tmp_copy = copy.deepcopy(self.yaml_dict) # set the format attributes if available try: tmp_copy.fa.set_block_style() except AttributeError: pass result = Yedit.add_entry(tmp_copy, path, value, self.separator) if result is not None: self.yaml_dict = tmp_copy return (True, self.yaml_dict) return (False, self.yaml_dict) @staticmethod def get_curr_value(invalue, val_type): '''return the current value''' if invalue is None: return None curr_value = invalue if val_type == 'yaml': curr_value = yaml.safe_load(str(invalue)) elif val_type == 'json': curr_value = json.loads(invalue) return curr_value @staticmethod def parse_value(inc_value, vtype=''): '''determine value type passed''' true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', 'on', 'On', 'ON', ] false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', 'off', 'Off', 'OFF'] # It came in as a string but you didn't specify value_type as string # we will convert to bool if it matches any of the above cases if isinstance(inc_value, str) and 'bool' in vtype: if inc_value not in true_bools and inc_value not in false_bools: raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype)) elif isinstance(inc_value, bool) and 'str' in vtype: inc_value = str(inc_value) # There is a special case where '' will turn into None after yaml loading it so skip if isinstance(inc_value, str) and inc_value == '': pass # If vtype is not str then go ahead and attempt to yaml load it. elif isinstance(inc_value, str) and 'str' not in vtype: try: inc_value = yaml.safe_load(inc_value) except Exception: raise YeditException('Could not determine type of incoming value. ' + 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype)) return inc_value @staticmethod def process_edits(edits, yamlfile): '''run through a list of edits and process them one-by-one''' results = [] for edit in edits: value = Yedit.parse_value(edit['value'], edit.get('value_type', '')) if edit.get('action') == 'update': # pylint: disable=line-too-long curr_value = Yedit.get_curr_value( Yedit.parse_value(edit.get('curr_value')), edit.get('curr_value_format')) rval = yamlfile.update(edit['key'], value, edit.get('index'), curr_value) elif edit.get('action') == 'append': rval = yamlfile.append(edit['key'], value) else: rval = yamlfile.put(edit['key'], value) if rval[0]: results.append({'key': edit['key'], 'edit': rval[1]}) return {'changed': len(results) > 0, 'results': results} # pylint: disable=too-many-return-statements,too-many-branches @staticmethod def run_ansible(params): '''perform the idempotent crud operations''' yamlfile = Yedit(filename=params['src'], backup=params['backup'], content_type=params['content_type'], backup_ext=params['backup_ext'], separator=params['separator']) state = params['state'] if params['src']: rval = yamlfile.load() if yamlfile.yaml_dict is None and state != 'present': return {'failed': True, 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) + 'file exists, that it is has correct permissions, and is valid yaml.'} if state == 'list': if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) yamlfile.yaml_dict = content if params['key']: rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} elif state == 'absent': if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) yamlfile.yaml_dict = content if params['update']: rval = yamlfile.pop(params['key'], params['value']) else: rval = yamlfile.delete(params['key'], params['index'], params['value']) if rval[0] and params['src']: yamlfile.write() return {'changed': rval[0], 'result': rval[1], 'state': state} elif state == 'present': # check if content is different than what is in the file if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) # We had no edits to make and the contents are the same if yamlfile.yaml_dict == content and \ params['value'] is None: return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} yamlfile.yaml_dict = content # If we were passed a key, value then # we enapsulate it in a list and process it # Key, Value passed to the module : Converted to Edits list # edits = [] _edit = {} if params['value'] is not None: _edit['value'] = params['value'] _edit['value_type'] = params['value_type'] _edit['key'] = params['key'] if params['update']: _edit['action'] = 'update' _edit['curr_value'] = params['curr_value'] _edit['curr_value_format'] = params['curr_value_format'] _edit['index'] = params['index'] elif params['append']: _edit['action'] = 'append' edits.append(_edit) elif params['edits'] is not None: edits = params['edits'] if edits: results = Yedit.process_edits(edits, yamlfile) # if there were changes and a src provided to us we need to write if results['changed'] and params['src']: yamlfile.write() return {'changed': results['changed'], 'result': results['results'], 'state': state} # no edits to make if params['src']: # pylint: disable=redefined-variable-type rval = yamlfile.write() return {'changed': rval[0], 'result': rval[1], 'state': state} # We were passed content but no src, key or value, or edits. Return contents in memory return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} return {'failed': True, 'msg': 'Unkown state passed'} # -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- # -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- # pylint: disable=too-many-lines # noqa: E301,E302,E303,T001 class OpenShiftCLIError(Exception): '''Exception class for openshiftcli''' pass ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] def locate_oc_binary(): ''' Find and return oc binary file ''' # https://github.com/openshift/openshift-ansible/issues/3410 # oc can be in /usr/local/bin in some cases, but that may not # be in $PATH due to ansible/sudo paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS oc_binary = 'oc' # Use shutil.which if it is available, otherwise fallback to a naive path search try: which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) if which_result is not None: oc_binary = which_result except AttributeError: for path in paths: if os.path.exists(os.path.join(path, oc_binary)): oc_binary = os.path.join(path, oc_binary) break return oc_binary # pylint: disable=too-few-public-methods class OpenShiftCLI(object): ''' Class to wrap the command line tools ''' def __init__(self, namespace, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False, all_namespaces=False): ''' Constructor for OpenshiftCLI ''' self.namespace = namespace self.verbose = verbose self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) self.all_namespaces = all_namespaces self.oc_binary = locate_oc_binary() # Pylint allows only 5 arguments to be passed. # pylint: disable=too-many-arguments def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'): ''' replace the current object with the content ''' res = self._get(resource, rname) if not res['results']: return res fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, res['results'][0], separator=sep) updated = False if content is not None: changes = [] for key, value in content.items(): changes.append(yed.put(key, value)) if any([change[0] for change in changes]): updated = True elif edits is not None: results = Yedit.process_edits(edits, yed) if results['changed']: updated = True if updated: yed.write() atexit.register(Utils.cleanup, [fname]) return self._replace(fname, force) return {'returncode': 0, 'updated': False} def _replace(self, fname, force=False): '''replace the current object with oc replace''' # We are removing the 'resourceVersion' to handle # a race condition when modifying oc objects yed = Yedit(fname) results = yed.delete('metadata.resourceVersion') if results[0]: yed.write() cmd = ['replace', '-f', fname] if force: cmd.append('--force') return self.openshift_cmd(cmd) def _create_from_content(self, rname, content): '''create a temporary file and then call oc create on it''' fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, content=content) yed.write() atexit.register(Utils.cleanup, [fname]) return self._create(fname) def _create(self, fname): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' cmd = ['delete', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) else: raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501 '''process a template template_name: the name of the template to process create: whether to send to oc create after processing params: the parameters for the template template_data: the incoming template's data; instead of a file ''' cmd = ['process'] if template_data: cmd.extend(['-f', '-']) else: cmd.append(template_name) if params: param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()] cmd.append('-p') cmd.extend(param_str) results = self.openshift_cmd(cmd, output=True, input_data=template_data) if results['returncode'] != 0 or not create: return results fname = Utils.create_tmpfile(template_name + '-') yed = Yedit(fname, results['results']) yed.write() atexit.register(Utils.cleanup, [fname]) return self.openshift_cmd(['create', '-f', fname]) def _get(self, resource, name=None, selector=None, field_selector=None): '''return a resource by name ''' cmd = ['get', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) if field_selector is not None: cmd.append('--field-selector={}'.format(field_selector)) # Name cannot be used with selector or field_selector. if selector is None and field_selector is None and name is not None: cmd.append(name) cmd.extend(['-o', 'json']) rval = self.openshift_cmd(cmd, output=True) # Ensure results are retuned in an array if 'items' in rval: rval['results'] = rval['items'] elif not isinstance(rval['results'], list): rval['results'] = [rval['results']] return rval def _schedulable(self, node=None, selector=None, schedulable=True): ''' perform oadm manage-node scheduable ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 def _list_pods(self, node=None, selector=None, pod_selector=None): ''' perform oadm list pods node: the node in which to list pods selector: the label selector filter if provided pod_selector: the pod selector filter if provided ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # pylint: disable=too-many-arguments def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): ''' perform oadm manage-node evacuate ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') cmd.append('--evacuate') return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') def _version(self): ''' return the openshift version''' return self.openshift_cmd(['version'], output=True, output_type='raw') def _import_image(self, url=None, name=None, tag=None): ''' perform image import ''' cmd = ['import-image'] image = '{0}'.format(name) if tag: image += ':{0}'.format(tag) cmd.append(image) if url: cmd.append('--from={0}/{1}'.format(url, image)) cmd.append('-n{0}'.format(self.namespace)) cmd.append('--confirm') return self.openshift_cmd(cmd) def _run(self, cmds, input_data): ''' Actually executes the command. This makes mocking easier. ''' curr_env = os.environ.copy() curr_env.update({'KUBECONFIG': self.kubeconfig}) proc = subprocess.Popen(cmds, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=curr_env) stdout, stderr = proc.communicate(input_data) return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): '''Base command for oc ''' cmds = [self.oc_binary] if oadm: cmds.append('adm') cmds.extend(cmd) if self.all_namespaces: cmds.extend(['--all-namespaces']) elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) if self.verbose: print(' '.join(cmds)) try: returncode, stdout, stderr = self._run(cmds, input_data) except OSError as ex: returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, "cmd": ' '.join(cmds)} if output_type == 'json': rval['results'] = {} if output and stdout: try: rval['results'] = json.loads(stdout) except ValueError as verr: if "No JSON object could be decoded" in verr.args: rval['err'] = verr.args elif output_type == 'raw': rval['results'] = stdout if output else '' if self.verbose: print("STDOUT: {0}".format(stdout)) print("STDERR: {0}".format(stderr)) if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, "stdout": stdout}) return rval class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod def _write(filename, contents): ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): ''' create a file in tmp with name and contents''' tmp = Utils.create_tmpfile(prefix=rname) if ftype == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripDumper'): Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) else: Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) elif ftype == 'json': Utils._write(tmp, json.dumps(data)) else: Utils._write(tmp, data) # Register cleanup when module is done atexit.register(Utils.cleanup, [tmp]) return tmp @staticmethod def create_tmpfile_copy(inc_file): '''create a temporary copy of a file''' tmpfile = Utils.create_tmpfile('lib_openshift-') Utils._write(tmpfile, open(inc_file).read()) # Cleanup the tmpfile atexit.register(Utils.cleanup, [tmpfile]) return tmpfile @staticmethod def create_tmpfile(prefix='tmp'): ''' Generates and returns a temporary file name ''' with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: return tmp.name @staticmethod def create_tmp_files_from_contents(content, content_type=None): '''Turn an array of dict: filename, content into a files array''' if not isinstance(content, list): content = [content] files = [] for item in content: path = Utils.create_tmp_file_from_contents(item['path'] + '-', item['data'], ftype=content_type) files.append({'name': os.path.basename(item['path']), 'path': path}) return files @staticmethod def cleanup(files): '''Clean up on exit ''' for sfile in files: if os.path.exists(sfile): if os.path.isdir(sfile): shutil.rmtree(sfile) elif os.path.isfile(sfile): os.remove(sfile) @staticmethod def exists(results, _name): ''' Check to see if the results include the name ''' if not results: return False if Utils.find_result(results, _name): return True return False @staticmethod def find_result(results, _name): ''' Find the specified result by name''' rval = None for result in results: if 'metadata' in result and result['metadata']['name'] == _name: rval = result break return rval @staticmethod def get_resource_file(sfile, sfile_type='yaml'): ''' return the service file ''' contents = None with open(sfile) as sfd: contents = sfd.read() if sfile_type == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripLoader'): contents = yaml.load(contents, yaml.RoundTripLoader) else: contents = yaml.safe_load(contents) elif sfile_type == 'json': contents = json.loads(contents) return contents @staticmethod def filter_versions(stdout): ''' filter the oc version output ''' version_dict = {} version_search = ['oc', 'openshift', 'kubernetes'] for line in stdout.strip().split('\n'): for term in version_search: if not line: continue if line.startswith(term): version_dict[term] = line.split()[-1] # horrible hack to get openshift version in Openshift 3.2 # By default "oc version in 3.2 does not return an "openshift" version if "openshift" not in version_dict: version_dict["openshift"] = version_dict["oc"] return version_dict @staticmethod def add_custom_versions(versions): ''' create custom versions strings ''' versions_dict = {} for tech, version in versions.items(): # clean up "-" from version if "-" in version: version = version.split("-")[0] if version.startswith('v'): version = version[1:] # Remove the 'v' prefix versions_dict[tech + '_numeric'] = version.split('+')[0] # "3.3.0.33" is what we have, we want "3.3" versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.')) return versions_dict @staticmethod def openshift_installed(): ''' check if openshift is installed ''' import rpm transaction_set = rpm.TransactionSet() rpmquery = transaction_set.dbMatch("name", "atomic-openshift") return rpmquery.count() > 0 # Disabling too-many-branches. This is a yaml dictionary comparison function # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements @staticmethod def check_def_equal(user_def, result_def, skip_keys=None, debug=False): ''' Given a user defined definition, compare it with the results given back by our query. ''' # Currently these values are autogenerated and we do not need to check them skip = ['metadata', 'status'] if skip_keys: skip.extend(skip_keys) for key, value in result_def.items(): if key in skip: continue # Both are lists if isinstance(value, list): if key not in user_def: if debug: print('User data does not have key [%s]' % key) print('User data: %s' % user_def) return False if not isinstance(user_def[key], list): if debug: print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) return False if len(user_def[key]) != len(value): if debug: print("List lengths are not equal.") print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) print("user_def: %s" % user_def[key]) print("value: %s" % value) return False for values in zip(user_def[key], value): if isinstance(values[0], dict) and isinstance(values[1], dict): if debug: print('sending list - list') print(type(values[0])) print(type(values[1])) result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) if not result: print('list compare returned false') return False elif value != user_def[key]: if debug: print('value should be identical') print(user_def[key]) print(value) return False # recurse on a dictionary elif isinstance(value, dict): if key not in user_def: if debug: print("user_def does not have key [%s]" % key) return False if not isinstance(user_def[key], dict): if debug: print("dict returned false: not instance of dict") return False # before passing ensure keys match api_values = set(value.keys()) - set(skip) user_values = set(user_def[key].keys()) - set(skip) if api_values != user_values: if debug: print("keys are not equal in dict") print(user_values) print(api_values) return False result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) if not result: if debug: print("dict returned false") print(result) return False # Verify each key, value pair is the same else: if key not in user_def or value != user_def[key]: if debug: print("value not equal; user_def does not have key") print(key) print(value) if key in user_def: print(user_def[key]) return False if debug: print('returning true') return True class OpenShiftCLIConfig(object): '''Generic Config''' def __init__(self, rname, namespace, kubeconfig, options): self.kubeconfig = kubeconfig self.name = rname self.namespace = namespace self._options = options @property def config_options(self): ''' return config options ''' return self._options def to_option_list(self, ascommalist=''): '''return all options as a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs''' return self.stringify(ascommalist) def stringify(self, ascommalist=''): ''' return the options hash as cli params in a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs ''' rval = [] for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: val = data['value'] rval.append('--{}={}'.format(key.replace('_', '-'), val)) return rval # -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- # -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*- # pylint: disable=too-many-public-methods class DeploymentConfig(Yedit): ''' Class to model an openshift DeploymentConfig''' default_deployment_config = ''' apiVersion: v1 kind: DeploymentConfig metadata: name: default_dc namespace: default spec: replicas: 0 selector: default_dc: default_dc strategy: resources: {} rollingParams: intervalSeconds: 1 maxSurge: 0 maxUnavailable: 25% timeoutSeconds: 600 updatePercent: -25 updatePeriodSeconds: 1 type: Rolling template: metadata: spec: containers: - env: - name: default value: default image: default imagePullPolicy: IfNotPresent name: default_dc ports: - containerPort: 8000 hostPort: 8000 protocol: TCP name: default_port resources: {} terminationMessagePath: /dev/termination-log dnsPolicy: ClusterFirst hostNetwork: true nodeSelector: type: compute restartPolicy: Always securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 triggers: - type: ConfigChange ''' replicas_path = "spec.replicas" env_path = "spec.template.spec.containers[0].env" volumes_path = "spec.template.spec.volumes" container_path = "spec.template.spec.containers" volume_mounts_path = "spec.template.spec.containers[0].volumeMounts" def __init__(self, content=None): ''' Constructor for deploymentconfig ''' if not content: content = DeploymentConfig.default_deployment_config super(DeploymentConfig, self).__init__(content=content) def add_env_value(self, key, value): ''' add key, value pair to env array ''' rval = False env = self.get_env_vars() if env: env.append({'name': key, 'value': value}) rval = True else: result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value}) rval = result[0] return rval def exists_env_value(self, key, value): ''' return whether a key, value pair exists ''' results = self.get_env_vars() if not results: return False for result in results: if result['name'] == key: if 'value' not in result: if value == "" or value is None: return True elif result['value'] == value: return True return False def exists_env_key(self, key): ''' return whether a key, value pair exists ''' results = self.get_env_vars() if not results: return False for result in results: if result['name'] == key: return True return False def get_env_var(self, key): '''return a environment variables ''' results = self.get(DeploymentConfig.env_path) or [] if not results: return None for env_var in results: if env_var['name'] == key: return env_var return None def get_env_vars(self): '''return a environment variables ''' return self.get(DeploymentConfig.env_path) or [] def delete_env_var(self, keys): '''delete a list of keys ''' if not isinstance(keys, list): keys = [keys] env_vars_array = self.get_env_vars() modified = False idx = None for key in keys: for env_idx, env_var in enumerate(env_vars_array): if env_var['name'] == key: idx = env_idx break if idx: modified = True del env_vars_array[idx] if modified: return True return False def update_env_var(self, key, value): '''place an env in the env var list''' env_vars_array = self.get_env_vars() idx = None for env_idx, env_var in enumerate(env_vars_array): if env_var['name'] == key: idx = env_idx break if idx: env_vars_array[idx]['value'] = value else: self.add_env_value(key, value) return True def exists_volume_mount(self, volume_mount): ''' return whether a volume mount exists ''' exist_volume_mounts = self.get_volume_mounts() if not exist_volume_mounts: return False volume_mount_found = False for exist_volume_mount in exist_volume_mounts: if exist_volume_mount['name'] == volume_mount['name']: volume_mount_found = True break return volume_mount_found def exists_volume(self, volume): ''' return whether a volume exists ''' exist_volumes = self.get_volumes() volume_found = False for exist_volume in exist_volumes: if exist_volume['name'] == volume['name']: volume_found = True break return volume_found def find_volume_by_name(self, volume, mounts=False): ''' return the index of a volume ''' volumes = [] if mounts: volumes = self.get_volume_mounts() else: volumes = self.get_volumes() for exist_volume in volumes: if exist_volume['name'] == volume['name']: return exist_volume return None def get_replicas(self): ''' return replicas setting ''' return self.get(DeploymentConfig.replicas_path) def get_volume_mounts(self): '''return volume mount information ''' return self.get_volumes(mounts=True) def get_volumes(self, mounts=False): '''return volume mount information ''' if mounts: return self.get(DeploymentConfig.volume_mounts_path) or [] return self.get(DeploymentConfig.volumes_path) or [] def delete_volume_by_name(self, volume): '''delete a volume ''' modified = False exist_volume_mounts = self.get_volume_mounts() exist_volumes = self.get_volumes() del_idx = None for idx, exist_volume in enumerate(exist_volumes): if 'name' in exist_volume and exist_volume['name'] == volume['name']: del_idx = idx break if del_idx != None: del exist_volumes[del_idx] modified = True del_idx = None for idx, exist_volume_mount in enumerate(exist_volume_mounts): if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']: del_idx = idx break if del_idx != None: del exist_volume_mounts[idx] modified = True return modified def add_volume_mount(self, volume_mount): ''' add a volume or volume mount to the proper location ''' exist_volume_mounts = self.get_volume_mounts() if not exist_volume_mounts and volume_mount: self.put(DeploymentConfig.volume_mounts_path, [volume_mount]) else: exist_volume_mounts.append(volume_mount) def add_volume(self, volume): ''' add a volume or volume mount to the proper location ''' exist_volumes = self.get_volumes() if not volume: return if not exist_volumes: self.put(DeploymentConfig.volumes_path, [volume]) else: exist_volumes.append(volume) def update_replicas(self, replicas): ''' update replicas value ''' self.put(DeploymentConfig.replicas_path, replicas) def update_volume(self, volume): '''place an env in the env var list''' exist_volumes = self.get_volumes() if not volume: return False # update the volume update_idx = None for idx, exist_vol in enumerate(exist_volumes): if exist_vol['name'] == volume['name']: update_idx = idx break if update_idx != None: exist_volumes[update_idx] = volume else: self.add_volume(volume) return True def update_volume_mount(self, volume_mount): '''place an env in the env var list''' modified = False exist_volume_mounts = self.get_volume_mounts() if not volume_mount: return False # update the volume mount for exist_vol_mount in exist_volume_mounts: if exist_vol_mount['name'] == volume_mount['name']: if 'mountPath' in exist_vol_mount and \ str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']): exist_vol_mount['mountPath'] = volume_mount['mountPath'] modified = True break if not modified: self.add_volume_mount(volume_mount) modified = True return modified def needs_update_volume(self, volume, volume_mount): ''' verify a volume update is needed ''' exist_volume = self.find_volume_by_name(volume) exist_volume_mount = self.find_volume_by_name(volume, mounts=True) results = [] results.append(exist_volume['name'] == volume['name']) if 'secret' in volume: results.append('secret' in exist_volume) results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName']) results.append(exist_volume_mount['name'] == volume_mount['name']) results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath']) elif 'emptyDir' in volume: results.append(exist_volume_mount['name'] == volume['name']) results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath']) elif 'persistentVolumeClaim' in volume: pvc = 'persistentVolumeClaim' results.append(pvc in exist_volume) if results[-1]: results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName']) if 'claimSize' in volume[pvc]: results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize']) elif 'hostpath' in volume: results.append('hostPath' in exist_volume) results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath']) return not all(results) def needs_update_replicas(self, replicas): ''' verify whether a replica update is needed ''' current_reps = self.get(DeploymentConfig.replicas_path) return not current_reps == replicas # -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*- # -*- -*- -*- Begin included fragment: lib/replicationcontroller.py -*- -*- -*- # pylint: disable=too-many-public-methods class ReplicationController(DeploymentConfig): ''' Class to model a replicationcontroller openshift object. Currently we are modeled after a deployment config since they are very similar. In the future, when the need arises we will add functionality to this class. ''' replicas_path = "spec.replicas" env_path = "spec.template.spec.containers[0].env" volumes_path = "spec.template.spec.volumes" container_path = "spec.template.spec.containers" volume_mounts_path = "spec.template.spec.containers[0].volumeMounts" def __init__(self, content): ''' Constructor for ReplicationController ''' super(ReplicationController, self).__init__(content=content) # -*- -*- -*- End included fragment: lib/replicationcontroller.py -*- -*- -*- # -*- -*- -*- Begin included fragment: class/oc_scale.py -*- -*- -*- # pylint: disable=too-many-instance-attributes class OCScale(OpenShiftCLI): ''' Class to wrap the oc command line tools ''' # pylint allows 5 # pylint: disable=too-many-arguments def __init__(self, resource_name, namespace, replicas, kind, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False): ''' Constructor for OCScale ''' super(OCScale, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose) self.kind = kind self.replicas = replicas self.name = resource_name self._resource = None @property def resource(self): ''' property function for resource var ''' if not self._resource: self.get() return self._resource @resource.setter def resource(self, data): ''' setter function for resource var ''' self._resource = data def get(self): '''return replicas information ''' vol = self._get(self.kind, self.name) if vol['returncode'] == 0: if self.kind == 'dc': # The resource returned from a query could be an rc or dc. # pylint: disable=redefined-variable-type self.resource = DeploymentConfig(content=vol['results'][0]) vol['results'] = [self.resource.get_replicas()] if self.kind == 'rc': # The resource returned from a query could be an rc or dc. # pylint: disable=redefined-variable-type self.resource = ReplicationController(content=vol['results'][0]) vol['results'] = [self.resource.get_replicas()] return vol def put(self): '''update replicas into dc ''' self.resource.update_replicas(self.replicas) return self._replace_content(self.kind, self.name, self.resource.yaml_dict) def needs_update(self): ''' verify whether an update is needed ''' return self.resource.needs_update_replicas(self.replicas) # pylint: disable=too-many-return-statements @staticmethod def run_ansible(params, check_mode): '''run the oc_scale module''' oc_scale = OCScale(params['name'], params['namespace'], params['replicas'], params['kind'], params['kubeconfig'], verbose=params['debug']) state = params['state'] api_rval = oc_scale.get() if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} ##### # Get ##### if state == 'list': return {'changed': False, 'result': api_rval['results'], 'state': 'list'} # noqa: E501 elif state == 'present': ######## # Update ######## if oc_scale.needs_update(): if check_mode: return {'changed': True, 'result': 'CHECK_MODE: Would have updated.'} # noqa: E501 api_rval = oc_scale.put() if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} # return the created object api_rval = oc_scale.get() if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} return {'changed': True, 'result': api_rval['results'], 'state': 'present'} # noqa: E501 return {'changed': False, 'result': api_rval['results'], 'state': 'present'} # noqa: E501 return {'failed': True, 'msg': 'Unknown state passed. [{}]'.format(state)} # -*- -*- -*- End included fragment: class/oc_scale.py -*- -*- -*- # -*- -*- -*- Begin included fragment: ansible/oc_scale.py -*- -*- -*- def main(): ''' ansible oc module for scaling ''' module = AnsibleModule( argument_spec=dict( kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), state=dict(default='present', type='str', choices=['present', 'list']), debug=dict(default=False, type='bool'), kind=dict(default='dc', choices=['dc', 'rc'], type='str'), namespace=dict(default='default', type='str'), replicas=dict(default=None, type='int'), name=dict(default=None, type='str'), ), supports_check_mode=True, ) rval = OCScale.run_ansible(module.params, module.check_mode) if 'failed' in rval: module.fail_json(**rval) module.exit_json(**rval) if __name__ == '__main__': main() # -*- -*- -*- End included fragment: ansible/oc_scale.py -*- -*- -*-
[]
[]
[ "PATH" ]
[]
["PATH"]
python
1
0
main.go
package main import ( "bytes" "context" "encoding/json" "fmt" "io/ioutil" "os" "regexp" "strings" "text/template" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/serializer/yaml" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/discovery" "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" "k8s.io/client-go/tools/clientcmd" ) func main() { // Lookup for env variable `PLUGIN_KUBECONFIG`. kubeconfig, exists := os.LookupEnv("PLUGIN_KUBECONFIG") switch exists { // If it does exists means user intents for out-of-cluster usage with provided kubeconfig case true: data := []byte(kubeconfig) // create a kubeconfig file err := ioutil.WriteFile("./kubeconfig", data, 0644) if err != nil { fmt.Println(err) os.Exit(1) } outOfCluster, err := clientcmd.BuildConfigFromFlags("", "./kubeconfig") if err != nil { fmt.Println(err) os.Exit(1) } fmt.Println("Out-of-cluster SSA initiliazing") err = ssa(context.Background(), outOfCluster) if err != nil { fmt.Println(err) os.Exit(1) } // If user didn't provide a kubeconfig dron8s defaults to create an in-cluster config case false: inCluster, err := rest.InClusterConfig() if err != nil { fmt.Println(err) os.Exit(1) } fmt.Println("In-cluster SSA initiliazing") err = ssa(context.Background(), inCluster) if err != nil { fmt.Println(err) os.Exit(1) } } } // https://ymmt2005.hatenablog.com/entry/2020/04/14/An_example_of_using_dynamic_client_of_k8s.io/client-go#Go-client-libraries func ssa(ctx context.Context, cfg *rest.Config) error { var decUnstructured = yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) // 1. Prepare a RESTMapper to find GVR dc, err := discovery.NewDiscoveryClientForConfig(cfg) if err != nil { return err } mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(dc)) // 2. Prepare the dynamic client dyn, err := dynamic.NewForConfig(cfg) if err != nil { return err } // 2.1. Read user's yaml yaml, err := ioutil.ReadFile(os.Getenv("PLUGIN_YAML")) if err != nil { return err } // convert it to string text := string(yaml) // Parse variables t := template.Must(template.New("dron8s").Option("missingkey=zero").Parse(text)) b := bytes.NewBuffer(make([]byte, 0, 512)) err = t.Execute(b, getVariablesFromDrone()) if err != nil { return err } text = b.String() // Parse each yaml from file configs := strings.Split(text, "---") // variable to hold and print how many yaml configs are present var sum int // Iterate over provided configs for i, v := range configs { // If a yaml starts with `---` // the first slice of `configs` will be empty // so we just skip (continue) to next iteration if len(v) == 0 { continue } // 3. Decode YAML manifest into unstructured.Unstructured obj := &unstructured.Unstructured{} _, gvk, err := decUnstructured.Decode([]byte(v), nil, obj) if err != nil { return err } // 4. Find GVR mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) if err != nil { return err } // 5. Obtain REST interface for the GVR var dr dynamic.ResourceInterface if mapping.Scope.Name() == meta.RESTScopeNameNamespace { if obj.GetNamespace() == "" { obj.SetNamespace("default") } // namespaced resources should specify the namespace dr = dyn.Resource(mapping.Resource).Namespace(obj.GetNamespace()) } else { // for cluster-wide resources dr = dyn.Resource(mapping.Resource) } // 6. Marshal object into JSON data, err := json.Marshal(obj) if err != nil { return err } fmt.Println("Applying config #", i) // 7. Create or Update the object with SSA // types.ApplyPatchType indicates SSA. // FieldManager specifies the field owner ID. _, err = dr.Patch(ctx, obj.GetName(), types.ApplyPatchType, data, metav1.PatchOptions{ FieldManager: "dron8s-plugin", }) if err != nil { return err } sum = i } fmt.Println("Dron8s finished applying ", sum+1, " configs.") return nil } // getVariablesFromDrone Get variables from drone func getVariablesFromDrone() map[string]string { ctx := make(map[string]string) pluginEnv := os.Environ() pluginReg := regexp.MustCompile(`^PLUGIN_(.*)=(.*)`) droneReg := regexp.MustCompile(`^DRONE_(.*)=(.*)`) for _, value := range pluginEnv { if pluginReg.MatchString(value) { matches := pluginReg.FindStringSubmatch(value) key := strings.ToLower(matches[1]) ctx[key] = matches[2] } if droneReg.MatchString(value) { matches := droneReg.FindStringSubmatch(value) key := strings.ToLower(matches[1]) ctx[key] = matches[2] } } return ctx }
[ "\"PLUGIN_YAML\"" ]
[]
[ "PLUGIN_YAML" ]
[]
["PLUGIN_YAML"]
go
1
0
web_app/routes/stats_routes.py
from flask import Blueprint, request, jsonify, render_template from sklearn.linear_model import LogisticRegression from web_app.models import User, Tweet import basilica import os API_KEY = os.getenv('BASILICA_API_KEY') connection = basilica.Connection(API_KEY) stats_routes = Blueprint('stats_routes', __name__) @stats_routes.route('/stats/prepare') def prepare(): print('Im here') users = User.query.all() return render_template('prepare_to_predict.html', users=users) @stats_routes.route('/stats/predict', methods=['POST']) def predict(): print('PREDICT ROUTE...') print('FORM DATA:', dict(request.form)) screen_name_a = request.form['screen_name_a'] screen_name_b = request.form['screen_name_b'] tweet_text = request.form['text'] user_a = User.query.filter(User.name == screen_name_a).one() user_b = User.query.filter(User.name == screen_name_b).one() user_a_tweets = user_a.tweets user_b_tweets = user_b.tweets embedding = [] labels = [] for tweet in user_a_tweets: labels.append(user_a.name) embedding.append(tweet.embeddings) for tweet in user_b_tweets: labels.append(user_b.name) embedding.append(tweet.embeddings) classifier = LogisticRegression() classifier.fit(embedding, labels) example_embedding = connection.embed_sentence(tweet_text) result = classifier.predict([example_embedding]) return render_template('results.html', screen_name_a=screen_name_a, screen_name_b=screen_name_b, tweet_text=tweet_text, screen_name_most_likely=result[0])
[]
[]
[ "BASILICA_API_KEY" ]
[]
["BASILICA_API_KEY"]
python
1
0
cast/management/commands/get_api_token.py
import os import requests from getpass import getpass from django.core.management.base import BaseCommand class Command(BaseCommand): help = "Get api token for user providing username/password" def handle(self, *args, **options): username = os.environ.get("USERNAME", "analytics") obtain_token_url = os.environ.get("OBTAIN_TOKEN_URL") params = {"username": username, "password": getpass()} r = requests.post(obtain_token_url, data=params) token = r.json()["token"] print("token: ", token)
[]
[]
[ "USERNAME", "OBTAIN_TOKEN_URL" ]
[]
["USERNAME", "OBTAIN_TOKEN_URL"]
python
2
0
tests/trainer/data_flow/test_train_loop_flow_scalar_1_0.py
""" Tests to ensure that the training loop works with a dict (1.0) """ import pytest from pytorch_lightning import Trainer from tests.base.deterministic_model import DeterministicModel from tests.base.boring_model import BoringModel import os import torch def test__training_step__flow_scalar(tmpdir): """ Tests that only training_step can be used """ os.environ['PL_DEV_DEBUG'] = '1' class TestModel(DeterministicModel): def training_step(self, batch, batch_idx): acc = self.step(batch, batch_idx) acc = acc + batch_idx self.training_step_called = True return acc def backward(self, trainer, loss, optimizer, optimizer_idx): loss.backward() model = TestModel() model.val_dataloader = None trainer = Trainer( default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, max_epochs=2, row_log_interval=1, weights_summary=None, ) trainer.fit(model) # make sure correct steps were called assert model.training_step_called assert not model.training_step_end_called assert not model.training_epoch_end_called def test__training_step__tr_step_end__flow_scalar(tmpdir): """ Tests that only training_step can be used """ os.environ['PL_DEV_DEBUG'] = '1' class TestModel(DeterministicModel): def training_step(self, batch, batch_idx): acc = self.step(batch, batch_idx) acc = acc + batch_idx self.training_step_called = True self.out = acc return acc def training_step_end(self, tr_step_output): assert self.out == tr_step_output assert self.count_num_graphs({'loss': tr_step_output}) == 1 self.training_step_end_called = True return tr_step_output def backward(self, trainer, loss, optimizer, optimizer_idx): loss.backward() model = TestModel() model.val_dataloader = None trainer = Trainer( default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, max_epochs=2, row_log_interval=1, weights_summary=None, ) trainer.fit(model) # make sure correct steps were called assert model.training_step_called assert model.training_step_end_called assert not model.training_epoch_end_called def test__training_step__epoch_end__flow_scalar(tmpdir): """ Tests that only training_step can be used """ os.environ['PL_DEV_DEBUG'] = '1' class TestModel(DeterministicModel): def training_step(self, batch, batch_idx): acc = self.step(batch, batch_idx) acc = acc + batch_idx self.training_step_called = True return acc def training_epoch_end(self, outputs): self.training_epoch_end_called = True # verify we saw the current num of batches assert len(outputs) == 2 for b in outputs: # time = 1 assert len(b) == 1 assert 'loss' in b assert isinstance(b, dict) def backward(self, trainer, loss, optimizer, optimizer_idx): loss.backward() model = TestModel() model.val_dataloader = None trainer = Trainer( default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, max_epochs=2, row_log_interval=1, weights_summary=None, ) trainer.fit(model) # make sure correct steps were called assert model.training_step_called assert not model.training_step_end_called assert model.training_epoch_end_called def test__training_step__step_end__epoch_end__flow_scalar(tmpdir): """ Tests that only training_step can be used """ os.environ['PL_DEV_DEBUG'] = '1' class TestModel(DeterministicModel): def training_step(self, batch, batch_idx): acc = self.step(batch, batch_idx) acc = acc + batch_idx self.training_step_called = True return acc def training_step_end(self, tr_step_output): assert isinstance(tr_step_output, torch.Tensor) assert self.count_num_graphs({'loss': tr_step_output}) == 1 self.training_step_end_called = True return tr_step_output def training_epoch_end(self, outputs): self.training_epoch_end_called = True # verify we saw the current num of batches assert len(outputs) == 2 for b in outputs: # time = 1 assert len(b) == 1 assert 'loss' in b assert isinstance(b, dict) def backward(self, trainer, loss, optimizer, optimizer_idx): loss.backward() model = TestModel() model.val_dataloader = None trainer = Trainer( default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, max_epochs=2, row_log_interval=1, weights_summary=None, ) trainer.fit(model) # make sure correct steps were called assert model.training_step_called assert model.training_step_end_called assert model.training_epoch_end_called def test_train_step_no_return(tmpdir): """ Tests that only training_step can be used """ class TestModel(BoringModel): def training_step(self, batch, batch_idx): self.training_step_called = True loss = self.step(batch[0]) self.log('a', loss, on_step=True, on_epoch=True) def training_epoch_end(self, outputs) -> None: assert len(outputs) == 0 model = TestModel() model.val_dataloader = None trainer = Trainer( default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, max_epochs=1, row_log_interval=1, weights_summary=None, ) with pytest.warns(UserWarning, match=r'.*training_step returned None.*'): trainer.fit(model)
[]
[]
[ "PL_DEV_DEBUG" ]
[]
["PL_DEV_DEBUG"]
python
1
0
test/functional/test_runner.py
#!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Run regression test suite. This module calls down into individual test cases via subprocess. It will forward all unrecognized arguments onto the individual test scripts. Functional tests are disabled on Windows by default. Use --force to run them anyway. For a description of arguments recognized by test scripts, see `test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`. """ import argparse from collections import deque import configparser import datetime import os import time import shutil import signal import sys import subprocess import tempfile import re import logging # Formatting. Default colors to empty strings. BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") try: # Make sure python thinks it can write unicode to its stdout "\u2713".encode("utf_8").decode(sys.stdout.encoding) TICK = "✓ " CROSS = "✖ " CIRCLE = "○ " except UnicodeDecodeError: TICK = "P " CROSS = "x " CIRCLE = "o " if os.name == 'posix': # primitive formatting on supported # terminal via ANSI escape sequences: BOLD = ('\033[0m', '\033[1m') BLUE = ('\033[0m', '\033[0;34m') RED = ('\033[0m', '\033[0;31m') GREY = ('\033[0m', '\033[1;30m') TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 BASE_SCRIPTS= [ # Scripts that are run by the travis build process. # Longest test should go first, to favor running tests in parallel 'wallet_basic.py', 'wallet_backup.py', # vv Tests less than 5m vv 'wallet_abandonconflict.py', 'wallet_reorg-stake.py', 'feature_coldStaking.py', 'rpc_rawtransaction.py', 'wallet_zapwallettxes.py', 'wallet_keypool_topup.py', 'p2p_pos_doublespend.py', 'wallet_txn_doublespend.py --mineblock', 'wallet_txn_clone.py --mineblock', 'interface_rest.py', 'feature_proxy.py', 'p2p_pos_fakestake.py', 'p2p_pos_fakestake_accepted.py', 'zerocoin_valid_public_spend.py', #'p2p_zpos_fakestake.py', #'p2p_zpos_fakestake_accepted.py', #'zerocoin_wrapped_serials.py', #'feature_block.py', #'rpc_fundrawtransaction.py', # vv Tests less than 2m vv 'feature_uacomment.py', 'wallet_listreceivedby.py', 'wallet_accounts.py', 'wallet_dump.py', 'rpc_listtransactions.py', # vv Tests less than 60s vv #'wallet_importmulti.py', #'mempool_limit.py', # We currently don't limit our mempool_reorg 'feature_reindex.py', 'rpc_bip38.py', # vv Tests less than 30s vv 'rpc_spork.py', 'rpc_budget.py', #'interface_zmq.py', 'interface_bitcoin_cli.py', 'mempool_resurrect.py', #'rpc_getchaintips.py', 'mempool_spend_coinbase.py', 'mempool_reorg.py', #'mempool_persist.py', # Not yet implemented 'interface_http.py', #'rpc_users.py', 'rpc_signrawtransaction.py', 'p2p_disconnect_ban.py', 'rpc_decodescript.py', 'rpc_blockchain.py', #'rpc_deprecated.py', 'wallet_disable.py', 'rpc_net.py', 'p2p_time_offset.py', 'wallet_keypool.py', #'p2p_mempool.py', #'mining_prioritisetransaction.py', #'p2p_invalid_block.py', #'p2p_invalid_tx.py', 'rpc_signmessage.py', #'wallet_import_rescan.py', #'mining_basic.py', #'wallet_bumpfee.py', #'wallet_listsinceblock.py', #'p2p_leak.py', 'wallet_encryption.py', #'feature_cltv.py', #'wallet_resendwallettransactions.py', #'feature_minchainwork.py', #'p2p_fingerprint.py', #'p2p_unrequested_blocks.py', #'feature_config_args.py', 'feature_help.py', # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time ] EXTENDED_SCRIPTS = [ # These tests are not run by the travis build process. # Longest test should go first, to favor running tests in parallel # vv Tests less than 20m vv #'feature_fee_estimation.py', # vv Tests less than 5m vv # vv Tests less than 2m vv #'p2p_timeouts.py', # vv Tests less than 60s vv #'p2p_feefilter.py', 'rpc_bind.py', # vv Tests less than 30s vv #'example_test.py', 'feature_notifications.py', 'rpc_invalidateblock.py', ] # Place EXTENDED_SCRIPTS first since it has the 3 longest running tests ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS NON_SCRIPTS = [ # These are python files that live in the functional tests directory, but are not test scripts. "combine_logs.py", "create_cache.py", "test_runner.py", ] def main(): # Parse arguments and pass through unrecognised args parser = argparse.ArgumentParser(add_help=False, usage='%(prog)s [test_runner.py options] [script options] [scripts]', description=__doc__, epilog=''' Help text and arguments for individual test script:''', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.') parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface') parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.') parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests') parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).') parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit') parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.') parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.') parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs') parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs") args, unknown_args = parser.parse_known_args() # args to be passed on always start with two dashes; tests are the remaining unknown args tests = [arg for arg in unknown_args if arg[:2] != "--"] passon_args = [arg for arg in unknown_args if arg[:2] == "--"] # Read config generated by configure. config = configparser.ConfigParser() configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini" config.read_file(open(configfile)) passon_args.append("--configfile=%s" % configfile) # Set up logging logging_level = logging.INFO if args.quiet else logging.DEBUG logging.basicConfig(format='%(message)s', level=logging_level) # Create base test directory tmpdir = "%s/pivx_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S")) os.makedirs(tmpdir) logging.debug("Temporary test directory at %s" % tmpdir) enable_wallet = config["components"].getboolean("ENABLE_WALLET") enable_utils = config["components"].getboolean("ENABLE_UTILS") enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND") if config["environment"]["EXEEXT"] == ".exe" and not args.force: # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964 print("Tests currently disabled on Windows by default. Use --force option to enable") sys.exit(0) if not (enable_wallet and enable_utils and enable_bitcoind): print("No functional tests to run. Wallet, utils, and pivxd must all be enabled") print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make") sys.exit(0) # Build list of tests if tests: # Individual tests have been specified. Run specified tests that exist # in the ALL_SCRIPTS list. Accept the name with or without .py extension. tests = [re.sub("\.py$", "", t) + ".py" for t in tests] test_list = [] for t in tests: if t in ALL_SCRIPTS: test_list.append(t) else: print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t)) else: # No individual tests have been specified. # Run all base tests, and optionally run extended tests. test_list = BASE_SCRIPTS if args.extended: # place the EXTENDED_SCRIPTS first since the three longest ones # are there and the list is shorter test_list = EXTENDED_SCRIPTS + test_list # Remove the test cases that the user has explicitly asked to exclude. if args.exclude: tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')] for exclude_test in tests_excl: if exclude_test in test_list: test_list.remove(exclude_test) else: print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test)) if not test_list: print("No valid test scripts specified. Check that your test is in one " "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") sys.exit(0) if args.help: # Print help for test_runner.py, then print help of the first script (with args removed) and exit. parser.print_help() subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h']) sys.exit(0) check_script_list(config["environment"]["SRCDIR"]) check_script_prefixes() if not args.keepcache: shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True) run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen) def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0): # Warn if bitcoind is already running (unix only) try: if subprocess.check_output(["pidof", "pivxd"]) is not None: print("%sWARNING!%s There is already a pivxd process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0])) except (OSError, subprocess.SubprocessError): pass # Warn if there is a cache directory cache_dir = "%s/test/cache" % build_dir if os.path.isdir(cache_dir): print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir)) #Set env vars if "BITCOIND" not in os.environ: os.environ["BITCOIND"] = build_dir + '/src/pivxd' + exeext os.environ["BITCOINCLI"] = build_dir + '/src/pivx-cli' + exeext tests_dir = src_dir + '/test/functional/' flags = ["--srcdir={}/src".format(build_dir)] + args flags.append("--cachedir=%s" % cache_dir) if enable_coverage: coverage = RPCCoverage() flags.append(coverage.flag) logging.debug("Initializing coverage directory at %s" % coverage.dir) else: coverage = None if len(test_list) > 1 and jobs > 1: # Populate cache try: subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir]) except subprocess.CalledProcessError as e: sys.stdout.buffer.write(e.output) raise #Run Tests job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags) time0 = time.time() test_results = [] max_len_name = len(max(test_list, key=len)) test_count = len(test_list) for i in range(test_count): test_result, testdir, stdout, stderr = job_queue.get_next() test_results.append(test_result) done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0]) if test_result.status == "Passed": if stderr == "": logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time)) else: logging.debug("%s passed (with warnings), Duration: %s s" % (done_str, test_result.time)) print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n') elif test_result.status == "Skipped": logging.debug("%s skipped" % (done_str)) else: print("%s failed, Duration: %s s\n" % (done_str, test_result.time)) print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n') print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n') if combined_logs_len and os.path.isdir(testdir): # Print the final `combinedlogslen` lines of the combined logs print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0])) print('\n============') print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0])) print('============\n') combined_logs, _ = subprocess.Popen([os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate() print("\n".join(deque(combined_logs.splitlines(), combined_logs_len))) print_results(test_results, max_len_name, (int(time.time() - time0))) if coverage: coverage.report_rpc_coverage() logging.debug("Cleaning up coverage data") coverage.cleanup() # Clear up the temp directory if all subdirectories are gone if not os.listdir(tmpdir): os.rmdir(tmpdir) all_passed = all(map(lambda test_result: test_result.was_successful, test_results)) sys.exit(not all_passed) def print_results(test_results, max_len_name, runtime): results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] test_results.sort(key=lambda result: result.name.lower()) all_passed = True time_sum = 0 for test_result in test_results: all_passed = all_passed and test_result.was_successful time_sum += test_result.time test_result.padding = max_len_name results += str(test_result) status = TICK + "Passed" if all_passed else CROSS + "Failed" results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0] results += "Runtime: %s s\n" % (runtime) print(results) class TestHandler: """ Trigger the test scripts passed in via the list. """ def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None): assert(num_tests_parallel >= 1) self.num_jobs = num_tests_parallel self.tests_dir = tests_dir self.tmpdir = tmpdir self.test_list = test_list self.flags = flags self.num_running = 0 # In case there is a graveyard of zombie pivxds, we can apply a # pseudorandom offset to hopefully jump over them. # (625 is PORT_RANGE/MAX_NODES) self.portseed_offset = int(time.time() * 1000) % 625 self.jobs = [] def get_next(self): while self.num_running < self.num_jobs and self.test_list: # Add tests self.num_running += 1 t = self.test_list.pop(0) portseed = len(self.test_list) + self.portseed_offset portseed_arg = ["--portseed={}".format(portseed)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) test_argv = t.split() testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed) tmpdir_arg = ["--tmpdir={}".format(testdir)] self.jobs.append((t, time.time(), subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, universal_newlines=True, stdout=log_stdout, stderr=log_stderr), testdir, log_stdout, log_stderr)) if not self.jobs: raise IndexError('pop from empty list') # Print remaining running jobs when all jobs have been started. if not self.test_list: print("Remaining jobs: [{}]".format(", ".join(j[0] for j in self.jobs))) dot_count = 0 while True: # Return first proc that finishes time.sleep(.5) for j in self.jobs: (name, time0, proc, testdir, log_out, log_err) = j if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60: # In travis, timeout individual tests after 20 minutes (to stop tests hanging and not # providing useful output. proc.send_signal(signal.SIGINT) if proc.poll() is not None: log_out.seek(0), log_err.seek(0) [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)] log_out.close(), log_err.close() if proc.returncode == TEST_EXIT_PASSED: status = "Passed" elif proc.returncode == TEST_EXIT_SKIPPED: status = "Skipped" else: status = "Failed" self.num_running -= 1 self.jobs.remove(j) clearline = '\r' + (' ' * dot_count) + '\r' print(clearline, end='', flush=True) dot_count = 0 return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr print('.', end='', flush=True) dot_count += 1 class TestResult(): def __init__(self, name, status, time): self.name = name self.status = status self.time = time self.padding = 0 def __repr__(self): if self.status == "Passed": color = BLUE glyph = TICK elif self.status == "Failed": color = RED glyph = CROSS elif self.status == "Skipped": color = GREY glyph = CIRCLE return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0] @property def was_successful(self): return self.status != "Failed" def check_script_prefixes(): """Check that at most a handful of the test scripts don't start with one of the allowed name prefixes.""" # LEEWAY is provided as a transition measure, so that pull-requests # that introduce new tests that don't conform with the naming # convention don't immediately cause the tests to fail. LEEWAY = 10 good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet|zerocoin)_") bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None] if len(bad_script_names) > 0: print("INFO: %d tests not meeting naming conventions:" % (len(bad_script_names))) print(" %s" % ("\n ".join(sorted(bad_script_names)))) assert len(bad_script_names) <= LEEWAY, "Too many tests not following naming convention! (%d found, maximum: %d)" % (len(bad_script_names), LEEWAY) def check_script_list(src_dir): """Check scripts directory. Check that there are no scripts in the functional tests directory which are not being run by pull-tester.py.""" script_dir = src_dir + '/test/functional/' python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"]) missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS))) if len(missed_tests) != 0: print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests))) if os.getenv('TRAVIS') == 'true': # On travis this warning is an error to prevent merging incomplete commits into master sys.exit(1) class RPCCoverage(): """ Coverage reporting utilities for test_runner. Coverage calculation works by having each test script subprocess write coverage files into a particular directory. These files contain the RPC commands invoked during testing, as well as a complete listing of RPC commands per `pivx-cli help` (`rpc_interface.txt`). After all tests complete, the commands run are combined and diff'd against the complete list to calculate uncovered RPC commands. See also: test/functional/test_framework/coverage.py """ def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = '--coveragedir=%s' % self.dir def report_rpc_coverage(self): """ Print out RPC commands that were unexercised by tests. """ uncovered = self._get_uncovered_rpc_commands() if uncovered: print("Uncovered RPC commands:") print("".join((" - %s\n" % i) for i in sorted(uncovered))) else: print("All RPC commands covered.") def cleanup(self): return shutil.rmtree(self.dir) def _get_uncovered_rpc_commands(self): """ Return a set of currently untested RPC commands. """ # This is shared from `test/functional/test-framework/coverage.py` reference_filename = 'rpc_interface.txt' coverage_file_prefix = 'coverage.' coverage_ref_filename = os.path.join(self.dir, reference_filename) coverage_filenames = set() all_cmds = set() covered_cmds = set() if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") with open(coverage_ref_filename, 'r') as f: all_cmds.update([i.strip() for i in f.readlines()]) for root, dirs, files in os.walk(self.dir): for filename in files: if filename.startswith(coverage_file_prefix): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: with open(filename, 'r') as f: covered_cmds.update([i.strip() for i in f.readlines()]) return all_cmds - covered_cmds if __name__ == '__main__': main()
[]
[]
[ "BITCOINCLI", "TRAVIS", "BITCOIND" ]
[]
["BITCOINCLI", "TRAVIS", "BITCOIND"]
python
3
0
vendor/github.com/docker/libnetwork/internal/setmatrix/setmatrix.go
package setmatrix import ( "sync" mapset "github.com/deckarep/golang-set" ) // SetMatrix is a map of Sets type SetMatrix interface { // Get returns the members of the set for a specific key as a slice. Get(key string) ([]interface{}, bool) // Contains is used to verify if an element is in a set for a specific key // returns true if the element is in the set // returns true if there is a set for the key Contains(key string, value interface{}) (bool, bool) // Insert inserts the value in the set of a key // returns true if the value is inserted (was not already in the set), false otherwise // returns also the length of the set for the key Insert(key string, value interface{}) (bool, int) // Remove removes the value in the set for a specific key // returns true if the value is deleted, false otherwise // returns also the length of the set for the key Remove(key string, value interface{}) (bool, int) // Cardinality returns the number of elements in the set for a key // returns false if the set is not present Cardinality(key string) (int, bool) // String returns the string version of the set, empty otherwise // returns false if the set is not present String(key string) (string, bool) // Returns all the keys in the map Keys() []string } type setMatrix struct { matrix map[string]mapset.Set sync.Mutex } // NewSetMatrix creates a new set matrix object func NewSetMatrix() SetMatrix { s := &setMatrix{} s.init() return s } func (s *setMatrix) init() { s.matrix = make(map[string]mapset.Set) } func (s *setMatrix) Get(key string) ([]interface{}, bool) { s.Lock() defer s.Unlock() set, ok := s.matrix[key] if !ok { return nil, ok } return set.ToSlice(), ok } func (s *setMatrix) Contains(key string, value interface{}) (bool, bool) { s.Lock() defer s.Unlock() set, ok := s.matrix[key] if !ok { return false, ok } return set.Contains(value), ok } func (s *setMatrix) Insert(key string, value interface{}) (bool, int) { s.Lock() defer s.Unlock() set, ok := s.matrix[key] if !ok { s.matrix[key] = mapset.NewSet() s.matrix[key].Add(value) return true, 1 } return set.Add(value), set.Cardinality() } func (s *setMatrix) Remove(key string, value interface{}) (bool, int) { s.Lock() defer s.Unlock() set, ok := s.matrix[key] if !ok { return false, 0 } var removed bool if set.Contains(value) { set.Remove(value) removed = true // If the set is empty remove it from the matrix if set.Cardinality() == 0 { delete(s.matrix, key) } } return removed, set.Cardinality() } func (s *setMatrix) Cardinality(key string) (int, bool) { s.Lock() defer s.Unlock() set, ok := s.matrix[key] if !ok { return 0, ok } return set.Cardinality(), ok } func (s *setMatrix) String(key string) (string, bool) { s.Lock() defer s.Unlock() set, ok := s.matrix[key] if !ok { return "", ok } return set.String(), ok } func (s *setMatrix) Keys() []string { s.Lock() defer s.Unlock() keys := make([]string, 0, len(s.matrix)) for k := range s.matrix { keys = append(keys, k) } return keys }
[]
[]
[]
[]
[]
go
null
null
null
main.go
package main import ( "bytes" "encoding/json" "fmt" "log" "net/http" "github.com/aws/aws-lambda-go/lambda" ) type Request struct { Records []struct { SNS struct { Type string `json:"Type"` Timestamp string `json:"Timestamp"` SNSMessage string `json:"Message"` } `json:"Sns"` } `json:"Records"` } type SNSMessage struct { AlarmName string `json:"AlarmName"` NewStateValue string `json:"NewStateValue"` NewStateReason string `json:"NewStateReason"` } type SlackMessage struct { Text string `json:"text"` Attachments []Attachment `json:"attachments"` } type Attachment struct { Text string `json:"text"` Color string `json:"color"` Title string `json:"title"` } func handler(request Request) error { var snsMessage SNSMessage err := json.Unmarshal([]byte(request.Records[0].SNS.SNSMessage), &snsMessage) if err != nil { return err } log.Printf("New alarm: %s - Reason: %s", snsMessage.AlarmName, snsMessage.NewStateReason) slackMessage := buildSlackMessage(snsMessage) postToSlack(slackMessage) log.Println("Notification has been sent") return nil } func buildSlackMessage(message SNSMessage) SlackMessage { return SlackMessage{ Text: fmt.Sprintf("`%s`", message.AlarmName), Attachments: []Attachment{ Attachment{ Text: message.NewStateReason, Color: "danger", Title: "Reason", }, }, } } func postToSlack(message SlackMessage) error { client := &http.Client{} data, err := json.Marshal(message) if err != nil { return err } req, err := http.NewRequest("POST".os.Getenv("SLACK_WEBBOOK"), bytes.NewBuffer(data)) if err != nil { return err } resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != 200 { fmt.Println(resp.StatusCode) return err } return nil } func main() { lambda.Start(handler) }
[ "\"SLACK_WEBBOOK\"" ]
[]
[ "SLACK_WEBBOOK" ]
[]
["SLACK_WEBBOOK"]
go
1
0
config/configuration.go
package config import ( "fmt" "log" "os" "path/filepath" "strings" "time" "github.com/gin-contrib/cors" "github.com/spf13/viper" ) const ( // local helm path helmPath = "helm.path" // DNSBaseDomain configuration key for the base domain setting DNSBaseDomain = "dns.domain" // DNSSecretNamespace configuration key for the K8s namespace setting // external DNS services secrets are mounted to. DNSSecretNamespace = "dns.secretNamespace" // DNSGcIntervalMinute configuration key for the interval setting at which the DNS garbage collector runs DNSGcIntervalMinute = "dns.gcIntervalMinute" // DNSGcLogLevel configuration key for the DNS garbage collector logging level default value: "debug" DNSGcLogLevel = "dns.gcLogLevel" // Route53MaintenanceWndMinute configuration key for the maintenance window for Route53. // This is the maintenance window before the next AWS Route53 pricing period starts Route53MaintenanceWndMinute = "route53.maintenanceWindowMinute" //PipelineMonitorNamespace pipeline infra namespace key PipelineMonitorNamespace = "infra.namespace" // EksTemplateLocation is the configuration key the location to get EKS Cloud Formation templates from // the location to get EKS Cloud Formation templates from EksTemplateLocation = "eks.templateLocation" // AwsCredentialPath is the path in Vault to get AWS credentials from for Pipeline AwsCredentialPath = "aws.credentials.path" // Config keys to GKE resource delete GKEResourceDeleteWaitAttempt = "gke.resourceDeleteWaitAttempt" GKEResourceDeleteSleepSeconds = "gke.resourceDeleteSleepSeconds" ) //Init initializes the configurations func init() { viper.AddConfigPath("$HOME/config") viper.AddConfigPath("./") viper.AddConfigPath("./config") viper.AddConfigPath("$PIPELINE_CONFIG_DIR/") viper.SetConfigName("config") //viper.SetConfigType("toml") // Set defaults TODO expand defaults viper.SetDefault("drone.url", "http://localhost:8000") viper.SetDefault("helm.retryAttempt", 30) viper.SetDefault("helm.retrySleepSeconds", 15) viper.SetDefault("helm.tillerVersion", "v2.9.0") viper.SetDefault("helm.stableRepositoryURL", "https://kubernetes-charts.storage.googleapis.com") viper.SetDefault("helm.banzaiRepositoryURL", "http://kubernetes-charts.banzaicloud.com") viper.SetDefault(helmPath, "./orgs") viper.SetDefault("cloud.defaultProfileName", "default") viper.SetDefault("cloud.configRetryCount", 30) viper.SetDefault("cloud.configRetrySleep", 15) viper.SetDefault(AwsCredentialPath, "secret/data/banzaicloud/aws") viper.SetDefault("logging.kubicornloglevel", "debug") viper.SetDefault("catalog.repositoryUrl", "http://kubernetes-charts.banzaicloud.com/branch/spotguide") pwd, err := os.Getwd() if err != nil { log.Fatalf("Error reading config file, %s", err.Error()) } viper.SetDefault("statestore.path", fmt.Sprintf("%s/statestore/", pwd)) viper.SetDefault("auth.jwtissuer", "https://banzaicloud.com/") viper.SetDefault("auth.jwtaudience", "https://pipeline.banzaicloud.com") viper.SetDefault("pipeline.listenport", 9090) viper.SetDefault("pipeline.certfile", "") viper.SetDefault("pipeline.keyfile", "") viper.SetDefault("pipeline.uipath", "/ui") viper.SetDefault("pipeline.basepath", "") viper.SetDefault("metrics.enabled", false) viper.SetDefault("metrics.port", ":9900") viper.SetDefault("database.dialect", "mysql") viper.SetDefault("database.port", 3306) viper.SetDefault("database.host", "localhost") viper.SetDefault("database.user", "kellyslater") viper.SetDefault("database.password", "pipemaster123!") viper.SetDefault("database.dbname", "pipelinedb") viper.SetDefault("database.logging", false) viper.SetDefault("audit.enabled", true) viper.SetDefault("audit.headers", []string{"secretId"}) viper.SetDefault("audit.skippaths", []string{"/auth/github/callback", "/pipeline/api"}) viper.SetDefault("tls.validity", "8760h") // 1 year viper.SetDefault(DNSBaseDomain, "banzaicloud.io") viper.SetDefault(DNSSecretNamespace, "pipeline-infra") viper.SetDefault(DNSGcIntervalMinute, 1) viper.SetDefault(DNSGcLogLevel, "debug") viper.SetDefault(Route53MaintenanceWndMinute, 15) viper.SetDefault(GKEResourceDeleteWaitAttempt, 12) viper.SetDefault(GKEResourceDeleteSleepSeconds, 5) ReleaseName := os.Getenv("KUBERNETES_RELEASE_NAME") if ReleaseName == "" { ReleaseName = "pipeline" } viper.SetDefault("monitor.release", ReleaseName) viper.SetDefault("monitor.enabled", false) viper.SetDefault("monitor.configmap", "") viper.SetDefault("monitor.mountpath", "") viper.SetDefault("monitor.grafanaAdminUsername", "admin") viper.SetDefault(PipelineMonitorNamespace, "pipeline-infra") viper.SetDefault(EksTemplateLocation, filepath.Join(pwd, "templates", "eks")) // Find and read the config file if err := viper.ReadInConfig(); err != nil { log.Fatalf("Error reading config file, %s", err) } // Confirm which config file is used fmt.Printf("Using config: %s\n", viper.ConfigFileUsed()) viper.SetEnvPrefix("pipeline") viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) viper.AutomaticEnv() } //GetCORS gets CORS related config func GetCORS() cors.Config { viper.SetDefault("cors.AllowAllOrigins", true) viper.SetDefault("cors.AllowOrigins", []string{"http://", "https://"}) viper.SetDefault("cors.AllowMethods", []string{"PUT", "DELETE", "GET", "POST", "OPTIONS"}) viper.SetDefault("cors.AllowHeaders", []string{"Origin", "Authorization", "Content-Type", "secretId"}) viper.SetDefault("cors.ExposeHeaders", []string{"Content-Length"}) viper.SetDefault("cors.AllowCredentials", true) viper.SetDefault("cors.MaxAge", 12) config := cors.DefaultConfig() cors.DefaultConfig() config.AllowAllOrigins = viper.GetBool("cors.AllowAllOrigins") if !config.AllowAllOrigins { config.AllowOrigins = viper.GetStringSlice("cors.AllowOrigins") } config.AllowMethods = viper.GetStringSlice("cors.AllowMethods") config.AllowHeaders = viper.GetStringSlice("cors.AllowHeaders") config.ExposeHeaders = viper.GetStringSlice("cors.ExposeHeaders") config.AllowCredentials = viper.GetBool("cors.AllowCredentials") maxAge := viper.GetInt("cors.MaxAge") config.MaxAge = time.Duration(maxAge) * time.Hour return config } // GetStateStorePath returns the state store path func GetStateStorePath(clusterName string) string { stateStorePath := viper.GetString("statestore.path") if len(clusterName) == 0 { return stateStorePath } return fmt.Sprintf("%s/%s", stateStorePath, clusterName) } // GetHelmPath returns local helm path func GetHelmPath(organizationName string) string { return fmt.Sprintf("%s/%s", viper.GetString(helmPath), organizationName) }
[ "\"KUBERNETES_RELEASE_NAME\"" ]
[]
[ "KUBERNETES_RELEASE_NAME" ]
[]
["KUBERNETES_RELEASE_NAME"]
go
1
0
extra/android/mak/build.py
from waflib.TaskGen import feature, before_method, after_method from waflib import Options, Context, Node, Utils, Errors, Build, TaskGen import os @feature('bugengine:android:aapt_resource') def aapt_resource(self): if 'android' in self.env.VALID_PLATFORMS: self.manifest = self.make_bld_node('src', '', 'AndroidManifest.xml') self.manifest_task = self.create_task('android_mft', [], self.manifest) self.resource_task = tsk = self.create_task('aapt_create', self.resource.ant_glob('**/*'), [self.destfile]) tsk.env.MANIFEST = self.manifest.abspath() tsk.env.RESOURCE_PATH = self.resource.abspath() tsk.dep_nodes = [self.manifest] nodes = [self.resource] while nodes: node = nodes.pop() if os.path.isdir(node.abspath()): nodes.extend([node.make_node(i) for i in node.listdir()]) else: tsk.dep_nodes.append(node) @feature('javac') @before_method('apply_java') def set_dirs(self): self.basedir = self.make_bld_node('jar', '', '') self.outdir = self.basedir @feature('dex') @after_method('apply_java') @before_method('process_source') def dex_files(self): """ Create a dex task. There can be only one dex task by task generator. """ if 'android' in self.env.VALID_PLATFORMS: dexopts = getattr(self, 'dexopts', []) self.outdir.mkdir() destfile = self.outdir.find_or_declare(self.destfile) self.dex_task = tsk = self.create_task('dex', [], [destfile]) tsk.basedir = self.basedir tsk.outdir = self.outdir tsk.cwd = self.outdir.abspath() self.install_files(os.path.join(self.bld.env.PREFIX, self.bld.optim), [destfile]) if getattr(self, 'javac_task', None): tsk.set_run_after(self.javac_task) def build(bld): if Options.options.android_jdk: if not bld.env.env: bld.env.env = dict(os.environ) bld.env.env['JAVA_HOME'] = Options.options.android_jdk bld.env.env['JRE_HOME'] = os.path.join(Options.options.android_jdk, 'jre') bld.recurse('tasks.py') bld.recurse('install.py') @feature('bugengine:multiarch') def apply_multiarch_android(self): pass @feature('cprogram', 'cxxprogram', 'cshlib', 'cxxshlib') @before_method('install_step') @after_method('set_postlink_task') def strip_debug_info(self): if self.env.ENV_PREFIX: self.strip_debug_info_impl() @feature('bugengine:launcher') @after_method('install_step') def install_program_android(self): if 'android' in self.env.VALID_PLATFORMS: if self.env.ENV_PREFIX: #in multiarch, also install the lib self.install_as( os.path.join( self.env.PREFIX, self.bld.optim, self.env.DEPLOY_BINDIR, self.env.cxxprogram_PATTERN % self.target_name ), self.postlink_task.outputs[0], chmod=Utils.O755 )
[]
[]
[]
[]
[]
python
0
0
cli/api/utils.go
// Copyright 2014, The Serviced Authors. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package api import ( "fmt" "os" "os/exec" "os/user" "path" "strconv" "strings" "github.com/zenoss/glog" "github.com/control-center/serviced/utils" ) const ( minTimeout = 30 defaultTimeout = 600 ) var ( empty interface{} unusedInt int ) // GetAgentIP returns the agent ip address func GetAgentIP() string { if options.Endpoint != "" { return options.Endpoint } agentIP, err := utils.GetIPAddress() if err != nil { panic(err) } return agentIP + ":4979" } // GetDockerDNS returns the docker dns address func GetDockerDNS() []string { if len(options.DockerDNS) > 0 { return options.DockerDNS } dockerdns := os.Getenv("SERVICED_DOCKER_DNS") return strings.Split(dockerdns, ",") } // GetVarPath returns the serviced varpath func GetVarPath() string { if options.VarPath != "" { return options.VarPath } else if home := os.Getenv("SERVICED_HOME"); home != "" { return path.Join(home, "var") } else if user, err := user.Current(); err == nil { return path.Join(os.TempDir(), "serviced-"+user.Username, "var") } return path.Join(os.TempDir(), "serviced") } // GetESStartupTimeout returns the Elastic Search Startup Timeout func GetESStartupTimeout() int { var timeout int if t := options.ESStartupTimeout; t > 0 { timeout = options.ESStartupTimeout } else if t := os.Getenv("ES_STARTUP_TIMEOUT"); t != "" { if res, err := strconv.Atoi(t); err != nil { timeout = res } } if timeout == 0 { timeout = defaultTimeout } else if timeout < minTimeout { timeout = minTimeout } return timeout } // GetGateway returns the default gateway func GetGateway(defaultRPCPort int) string { cmd := exec.Command("ip", "route") output, err := cmd.Output() localhost := URL{"127.0.0.1", defaultRPCPort} if err != nil { glog.V(2).Info("Error checking gateway: ", err) glog.V(1).Info("Could not get gateway using ", localhost.Host) return localhost.String() } for _, line := range strings.Split(string(output), "\n") { fields := strings.Fields(line) if len(fields) > 2 && fields[0] == "default" { endpoint := URL{fields[2], defaultRPCPort} return endpoint.String() } } glog.V(1).Info("No gateway found, using ", localhost.Host) return localhost.String() } type version []int func (a version) String() string { var format = make([]string, len(a)) for idx, value := range a { format[idx] = fmt.Sprintf("%d", value) } return strings.Join(format, ".") } func (a version) Compare(b version) int { astr := "" for _, s := range a { astr += fmt.Sprintf("%12d", s) } bstr := "" for _, s := range b { bstr += fmt.Sprintf("%12d", s) } if astr > bstr { return -1 } else if astr < bstr { return 1 } else { return 0 } }
[ "\"SERVICED_DOCKER_DNS\"", "\"SERVICED_HOME\"", "\"ES_STARTUP_TIMEOUT\"" ]
[]
[ "ES_STARTUP_TIMEOUT", "SERVICED_DOCKER_DNS", "SERVICED_HOME" ]
[]
["ES_STARTUP_TIMEOUT", "SERVICED_DOCKER_DNS", "SERVICED_HOME"]
go
3
0
workshop-resources/cdk/snyk-codesuite-cdk/cdk_stack_deploy/cdk_snyk_codeartifact_stack.py
from pathlib import Path from aws_cdk import ( aws_events as events, aws_events_targets as targets, aws_iam as iam, aws_codepipeline as pipeline, aws_codecommit as codecommit, aws_codebuild as codebuild, aws_codepipeline_actions as cpactions, aws_ssm as ssm, aws_s3 as s3, aws_logs as logs, core ) import os class SnykCodeartifactStack(core.Stack): def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #### PARAMETERS ### # TODO: Define Stack Parameters here, before sign off. Blank out these parameters # 1. Arn of the CodeCommit repository to be scanned # 2. Trail log bucket name, the name of the bucket to be created for the Codepipeline artifacts # 3. CodeArtifact Repo name, the name of the CA # 4. CodeArtifact Domain name, the domain name of the created CA codecommit_arn = '' artifact_bucket_name = '' codeartifact_repo_name = 'demo-domain' codeartifact_domain_name = 'pypi-store' codecommit_reponame = codecommit_arn.split(':')[5] account = os.environ['CDK_DEFAULT_ACCOUNT'] region = os.environ['CDK_DEFAULT_REGION'] projectname='CodeAritfactDemo' # Artifact Bucket artifact_bucket = s3.Bucket( self, "ArtifactBucket", bucket_name=artifact_bucket_name, block_public_access=s3.BlockPublicAccess( block_public_acls=True, block_public_policy=True, ignore_public_acls=True, restrict_public_buckets=True, ), encryption=s3.BucketEncryption.S3_MANAGED, object_ownership=s3.ObjectOwnership.BUCKET_OWNER_PREFERRED, removal_policy=core.RemovalPolicy.DESTROY, ) # Event Rule snyk_pipeline_rule = events.Rule( self, "SnykPipelineSchedule", description="A daily triggered rule to kick off the artifact scan", enabled=True, schedule=events.Schedule.expression('rate(1 day)') ) snyk_cw_role = iam.Role( self, "snyk_cw_role", assumed_by=iam.ServicePrincipal('events.amazonaws.com') ) snyk_cw_role_policy = iam.Policy( self, 'SnykCWRolePolicy', policy_name = 'cwe-pipeline-execution', document = iam.PolicyDocument( statements=[ iam.PolicyStatement( actions = ["codepipeline:StartPipelineExecution"], # TODO: Reference the pipeline created below resources = ["*"] ) ] ) ) snyk_cw_role.attach_inline_policy(snyk_cw_role_policy) codebuild_log_group = logs.LogGroup( self, 'CodeBuildLogGroup', log_group_name = 'snyk-pypi-ca-logs', retention = logs.RetentionDays('THREE_MONTHS'), removal_policy=core.RemovalPolicy.DESTROY, ) codebuild_service_role = iam.Role( self, "codebuild_service_role", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal('codebuild.amazonaws.com'), iam.ServicePrincipal('codepipeline.amazonaws.com') ) ) codebuild_service_role_policy = iam.Policy( self, 'SnykCodeBuildRolePolicy', policy_name = 'codebuildservicepolicy', document = iam.PolicyDocument( statements=[ iam.PolicyStatement( sid = 'CWLogsPermissions', actions = [ "logs:CreateLogStream", "logs:PutLogEvents" ], resources = [codebuild_log_group.log_group_arn], effect=iam.Effect.ALLOW, ), iam.PolicyStatement( sid = 'CodeCommitActions', actions = [ 'codecommit:GitPull', 'codecommit:GetBranch', 'codecommit:GetCommit', 'codecommit:GetUploadArchiveStatus', 'codecommit:UploadArchive' ], resources = [codecommit_arn], effect=iam.Effect.ALLOW, ), iam.PolicyStatement( sid = 'CodeBuildActions', actions = [ 'ssm:GetParam*', 'codebuild:BatchGetBuilds', "codebuild:StartBuild", 'codebuild:BatchGetBuildBatches', 'codebuild:StartBuildBatch' ], resources = ['*'], effect=iam.Effect.ALLOW, ), iam.PolicyStatement( sid = 'S3Permissions', actions = [ 's3:Get*', 's3:Put*' ], resources=[ f"arn:aws:s3:::{artifact_bucket_name}", f"arn:aws:s3:::{artifact_bucket_name}/*", ], effect=iam.Effect.ALLOW, ), iam.PolicyStatement( sid = 'CodeArtifactList', actions = [ 'codeartifact:Describe*', 'codeartifact:Get*', 'codeartifact:List*', 'codeartifact:ReadFromRepository', 'codeartifact:GetAuthorizationToken' ], resources = ['*'], effect=iam.Effect.ALLOW, ), iam.PolicyStatement( sid = 'STStoken', actions = ['sts:GetServiceBearerToken'], resources = ['*'], effect=iam.Effect.ALLOW, conditions={ "StringEqualsIfExists": {"sts:AWSServiceName": "codeartifact.amazonaws.com"} }, ), ] ) ) codebuild_service_role.attach_inline_policy(codebuild_service_role_policy) snyk_build_project= codebuild.PipelineProject( self, 'snykBuild', build_spec= codebuild.BuildSpec.from_object( { "version": '0.2', "env": { "parameter-store":{ "SNYK_TOKEN": 'snykAuthToken', "SNYK_ORG": 'snykOrg' } }, "phases":{ "install":{ "commands":[ "echo 'installing Snyk'", "npm install -g snyk" ] }, "pre_build":{ "commands":[ "echo 'authorizing Snyk'", "snyk config set api=$SNYK_TOKEN", "date=`date +%Y-%m-%d-%H%M%S`", "echo '*** Pulling packages from codeartifact ***'", "python list_repos.py", "echo '*** Updating pip ***'", "pip install --upgrade pip", "pip install --upgrade awscli" ] }, "build":{ "commands":[ "echo '*** Log in to AWS CodeArtifact ***'", "aws codeartifact login --tool pip --repository $repository --domain $domainName --domain-owner $domainOwner", "echo '***** Running pip install *****'", "python pip_install.py", "echo '***** Starting Snyk Security Scan *****'", "snyk monitor --file=requirements.txt --package-manager=pip --org=$SNYK_ORG --project-name=$projectname --skip-unresolved" ] }, "post_build":{ "commands":[ "echo '***** Scan completed, sending requirements to s3 *****'", "aws s3 mv requirements.txt s3://$artifactbucket/outputs/$date/requirements.txt", "aws s3 mv errors.txt s3://$artifactbucket/outputs/$date/errors.txt", "echo '***** Build completed *****'" ] } } } ), environment = codebuild.BuildEnvironment( build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3, compute_type=codebuild.ComputeType.LARGE, environment_variables = { 'domainName': codebuild.BuildEnvironmentVariable( value=codeartifact_domain_name ), 'domainOwner': codebuild.BuildEnvironmentVariable( value=account ), 'repository': codebuild.BuildEnvironmentVariable( value=codeartifact_repo_name ), 'projectname': codebuild.BuildEnvironmentVariable( value=projectname ), 'artifactbucket': codebuild.BuildEnvironmentVariable( value=artifact_bucket_name ), } ), logging = codebuild.LoggingOptions( cloud_watch = codebuild.CloudWatchLoggingOptions( log_group = codebuild_log_group ) ), role = codebuild_service_role ) source_artifact = pipeline.Artifact() snyk_pipeline = pipeline.Pipeline( self, 'snyk_pipeline', stages =[ pipeline.StageProps( stage_name = 'sourcestage', actions=[ cpactions.CodeCommitSourceAction( action_name='codecommit-source', output=source_artifact, repository=codecommit.Repository.from_repository_name(self,'cc_repository',codecommit_reponame), branch='master' ) ] ), pipeline.StageProps( stage_name='build', actions= [ cpactions.CodeBuildAction( action_name='SnykStage', input=source_artifact, project=snyk_build_project, check_secrets_in_plain_text_env_variables = True, run_order = 2 ) ] ) ], pipeline_name = "SnykCodeArtifactPipeline" )
[]
[]
[ "CDK_DEFAULT_ACCOUNT", "CDK_DEFAULT_REGION" ]
[]
["CDK_DEFAULT_ACCOUNT", "CDK_DEFAULT_REGION"]
python
2
0
storage/s3.go
package storage import ( "context" "crypto/tls" "errors" "fmt" "io" "net/http" urlpkg "net/url" "os" "strconv" "strings" "sync" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface" "github.com/peak/s5cmd/log" "github.com/peak/s5cmd/storage/url" ) var sentinelURL = urlpkg.URL{} const ( // deleteObjectsMax is the max allowed objects to be deleted on single HTTP // request. deleteObjectsMax = 1000 // Amazon Accelerated Transfer endpoint transferAccelEndpoint = "s3-accelerate.amazonaws.com" // Google Cloud Storage endpoint gcsEndpoint = "storage.googleapis.com" ) // Re-used AWS sessions dramatically improve performance. var globalSessionCache = &SessionCache{ sessions: map[Options]*session.Session{}, } // S3 is a storage type which interacts with S3API, DownloaderAPI and // UploaderAPI. type S3 struct { api s3iface.S3API downloader s3manageriface.DownloaderAPI uploader s3manageriface.UploaderAPI endpointURL urlpkg.URL dryRun bool } func parseEndpoint(endpoint string) (urlpkg.URL, error) { if endpoint == "" { return sentinelURL, nil } // add a scheme to correctly parse the endpoint. Without a scheme, // url.Parse will put the host information in path" if !strings.HasPrefix(endpoint, "http") { endpoint = "http://" + endpoint } u, err := urlpkg.Parse(endpoint) if err != nil { return sentinelURL, fmt.Errorf("parse endpoint %q: %v", endpoint, err) } return *u, nil } // NewS3Storage creates new S3 session. func newS3Storage(ctx context.Context, opts Options) (*S3, error) { endpointURL, err := parseEndpoint(opts.Endpoint) if err != nil { return nil, err } awsSession, err := globalSessionCache.newSession(ctx, opts) if err != nil { return nil, err } return &S3{ api: s3.New(awsSession), downloader: s3manager.NewDownloader(awsSession), uploader: s3manager.NewUploader(awsSession), endpointURL: endpointURL, dryRun: opts.DryRun, }, nil } // Stat retrieves metadata from S3 object without returning the object itself. func (s *S3) Stat(ctx context.Context, url *url.URL) (*Object, error) { output, err := s.api.HeadObjectWithContext(ctx, &s3.HeadObjectInput{ Bucket: aws.String(url.Bucket), Key: aws.String(url.Path), }) if err != nil { if errHasCode(err, "NotFound") { return nil, ErrGivenObjectNotFound } return nil, err } etag := aws.StringValue(output.ETag) mod := aws.TimeValue(output.LastModified) return &Object{ URL: url, Etag: strings.Trim(etag, `"`), ModTime: &mod, Size: aws.Int64Value(output.ContentLength), }, nil } // List is a non-blocking S3 list operation which paginates and filters S3 // keys. If no object found or an error is encountered during this period, // it sends these errors to object channel. func (s *S3) List(ctx context.Context, url *url.URL, _ bool) <-chan *Object { if isGoogleEndpoint(s.endpointURL) { return s.listObjects(ctx, url) } return s.listObjectsV2(ctx, url) } func (s *S3) listObjectsV2(ctx context.Context, url *url.URL) <-chan *Object { listInput := s3.ListObjectsV2Input{ Bucket: aws.String(url.Bucket), Prefix: aws.String(url.Prefix), } if url.Delimiter != "" { listInput.SetDelimiter(url.Delimiter) } objCh := make(chan *Object) go func() { defer close(objCh) objectFound := false var now time.Time err := s.api.ListObjectsV2PagesWithContext(ctx, &listInput, func(p *s3.ListObjectsV2Output, lastPage bool) bool { for _, c := range p.CommonPrefixes { prefix := aws.StringValue(c.Prefix) if !url.Match(prefix) { continue } newurl := url.Clone() newurl.Path = prefix objCh <- &Object{ URL: newurl, Type: ObjectType{os.ModeDir}, } objectFound = true } // track the instant object iteration began, // so it can be used to bypass objects created after this instant if now.IsZero() { now = time.Now().UTC() } for _, c := range p.Contents { key := aws.StringValue(c.Key) if !url.Match(key) { continue } mod := aws.TimeValue(c.LastModified).UTC() if mod.After(now) { objectFound = true continue } var objtype os.FileMode if strings.HasSuffix(key, "/") { objtype = os.ModeDir } newurl := url.Clone() newurl.Path = aws.StringValue(c.Key) etag := aws.StringValue(c.ETag) objCh <- &Object{ URL: newurl, Etag: strings.Trim(etag, `"`), ModTime: &mod, Type: ObjectType{objtype}, Size: aws.Int64Value(c.Size), StorageClass: StorageClass(aws.StringValue(c.StorageClass)), } objectFound = true } return !lastPage }) if err != nil { objCh <- &Object{Err: err} return } if !objectFound { objCh <- &Object{Err: ErrNoObjectFound} } }() return objCh } // listObjects is used for cloud services that does not support S3 // ListObjectsV2 API. I'm looking at you GCS. func (s *S3) listObjects(ctx context.Context, url *url.URL) <-chan *Object { listInput := s3.ListObjectsInput{ Bucket: aws.String(url.Bucket), Prefix: aws.String(url.Prefix), } if url.Delimiter != "" { listInput.SetDelimiter(url.Delimiter) } objCh := make(chan *Object) go func() { defer close(objCh) objectFound := false var now time.Time err := s.api.ListObjectsPagesWithContext(ctx, &listInput, func(p *s3.ListObjectsOutput, lastPage bool) bool { for _, c := range p.CommonPrefixes { prefix := aws.StringValue(c.Prefix) if !url.Match(prefix) { continue } newurl := url.Clone() newurl.Path = prefix objCh <- &Object{ URL: newurl, Type: ObjectType{os.ModeDir}, } objectFound = true } // track the instant object iteration began, // so it can be used to bypass objects created after this instant if now.IsZero() { now = time.Now().UTC() } for _, c := range p.Contents { key := aws.StringValue(c.Key) if !url.Match(key) { continue } mod := aws.TimeValue(c.LastModified).UTC() if mod.After(now) { objectFound = true continue } var objtype os.FileMode if strings.HasSuffix(key, "/") { objtype = os.ModeDir } newurl := url.Clone() newurl.Path = aws.StringValue(c.Key) etag := aws.StringValue(c.ETag) objCh <- &Object{ URL: newurl, Etag: strings.Trim(etag, `"`), ModTime: &mod, Type: ObjectType{objtype}, Size: aws.Int64Value(c.Size), StorageClass: StorageClass(aws.StringValue(c.StorageClass)), } objectFound = true } return !lastPage }) if err != nil { objCh <- &Object{Err: err} return } if !objectFound { objCh <- &Object{Err: ErrNoObjectFound} } }() return objCh } // Copy is a single-object copy operation which copies objects to S3 // destination from another S3 source. func (s *S3) Copy(ctx context.Context, from, to *url.URL, metadata Metadata) error { if s.dryRun { return nil } // SDK expects CopySource like "bucket[/key]" copySource := from.EscapedPath() input := &s3.CopyObjectInput{ Bucket: aws.String(to.Bucket), Key: aws.String(to.Path), CopySource: aws.String(copySource), } storageClass := metadata.StorageClass() if storageClass != "" { input.StorageClass = aws.String(storageClass) } sseEncryption := metadata.SSE() if sseEncryption != "" { input.ServerSideEncryption = aws.String(sseEncryption) sseKmsKeyID := metadata.SSEKeyID() if sseKmsKeyID != "" { input.SSEKMSKeyId = aws.String(sseKmsKeyID) } } acl := metadata.ACL() if acl != "" { input.ACL = aws.String(acl) } _, err := s.api.CopyObject(input) return err } // Read fetches the remote object and returns its contents as an io.ReadCloser. func (s *S3) Read(ctx context.Context, src *url.URL) (io.ReadCloser, error) { resp, err := s.api.GetObjectWithContext(ctx, &s3.GetObjectInput{ Bucket: aws.String(src.Bucket), Key: aws.String(src.Path), }) if err != nil { return nil, err } return resp.Body, nil } // Get is a multipart download operation which downloads S3 objects into any // destination that implements io.WriterAt interface. // Makes a single 'GetObject' call if 'concurrency' is 1 and ignores 'partSize'. func (s *S3) Get( ctx context.Context, from *url.URL, to io.WriterAt, concurrency int, partSize int64, ) (int64, error) { if s.dryRun { return 0, nil } return s.downloader.DownloadWithContext(ctx, to, &s3.GetObjectInput{ Bucket: aws.String(from.Bucket), Key: aws.String(from.Path), }, func(u *s3manager.Downloader) { u.PartSize = partSize u.Concurrency = concurrency }) } // Put is a multipart upload operation to upload resources, which implements // io.Reader interface, into S3 destination. func (s *S3) Put( ctx context.Context, reader io.Reader, to *url.URL, metadata Metadata, concurrency int, partSize int64, ) error { if s.dryRun { return nil } contentType := metadata.ContentType() if contentType == "" { contentType = "application/octet-stream" } input := &s3manager.UploadInput{ Bucket: aws.String(to.Bucket), Key: aws.String(to.Path), Body: reader, ContentType: aws.String(contentType), } storageClass := metadata.StorageClass() if storageClass != "" { input.StorageClass = aws.String(storageClass) } acl := metadata.ACL() if acl != "" { input.ACL = aws.String(acl) } sseEncryption := metadata.SSE() if sseEncryption != "" { input.ServerSideEncryption = aws.String(sseEncryption) sseKmsKeyID := metadata.SSEKeyID() if sseKmsKeyID != "" { input.SSEKMSKeyId = aws.String(sseKmsKeyID) } } _, err := s.uploader.UploadWithContext(ctx, input, func(u *s3manager.Uploader) { u.PartSize = partSize u.Concurrency = concurrency }) return err } // chunk is an object identifier container which is used on MultiDelete // operations. Since DeleteObjects API allows deleting objects up to 1000, // splitting keys into multiple chunks is required. type chunk struct { Bucket string Keys []*s3.ObjectIdentifier } // calculateChunks calculates chunks for given URL channel and returns // read-only chunk channel. func (s *S3) calculateChunks(ch <-chan *url.URL) <-chan chunk { chunkch := make(chan chunk) go func() { defer close(chunkch) var keys []*s3.ObjectIdentifier initKeys := func() { keys = make([]*s3.ObjectIdentifier, 0) } var bucket string for url := range ch { bucket = url.Bucket objid := &s3.ObjectIdentifier{Key: aws.String(url.Path)} keys = append(keys, objid) if len(keys) == deleteObjectsMax { chunkch <- chunk{ Bucket: bucket, Keys: keys, } initKeys() } } if len(keys) > 0 { chunkch <- chunk{ Bucket: bucket, Keys: keys, } } }() return chunkch } // Delete is a single object delete operation. func (s *S3) Delete(ctx context.Context, url *url.URL) error { chunk := chunk{ Bucket: url.Bucket, Keys: []*s3.ObjectIdentifier{ {Key: aws.String(url.Path)}, }, } resultch := make(chan *Object, 1) defer close(resultch) s.doDelete(ctx, chunk, resultch) obj := <-resultch return obj.Err } // doDelete deletes the given keys given by chunk. Results are piggybacked via // the Object container. func (s *S3) doDelete(ctx context.Context, chunk chunk, resultch chan *Object) { if s.dryRun { for _, k := range chunk.Keys { key := fmt.Sprintf("s3://%v/%v", chunk.Bucket, aws.StringValue(k.Key)) url, _ := url.New(key) resultch <- &Object{URL: url} } return } bucket := chunk.Bucket o, err := s.api.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ Bucket: aws.String(bucket), Delete: &s3.Delete{Objects: chunk.Keys}, }) if err != nil { resultch <- &Object{Err: err} return } for _, d := range o.Deleted { key := fmt.Sprintf("s3://%v/%v", bucket, aws.StringValue(d.Key)) url, _ := url.New(key) resultch <- &Object{URL: url} } for _, e := range o.Errors { key := fmt.Sprintf("s3://%v/%v", bucket, aws.StringValue(e.Key)) url, _ := url.New(key) resultch <- &Object{ URL: url, Err: fmt.Errorf(aws.StringValue(e.Message)), } } } // MultiDelete is a asynchronous removal operation for multiple objects. // It reads given url channel, creates multiple chunks and run these // chunks in parallel. Each chunk may have at most 1000 objects since DeleteObjects // API has a limitation. // See: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html. func (s *S3) MultiDelete(ctx context.Context, urlch <-chan *url.URL) <-chan *Object { resultch := make(chan *Object) go func() { sem := make(chan bool, 10) defer close(sem) defer close(resultch) chunks := s.calculateChunks(urlch) var wg sync.WaitGroup for chunk := range chunks { chunk := chunk wg.Add(1) sem <- true go func() { defer wg.Done() s.doDelete(ctx, chunk, resultch) <-sem }() } wg.Wait() }() return resultch } // ListBuckets is a blocking list-operation which gets bucket list and returns // the buckets that match with given prefix. func (s *S3) ListBuckets(ctx context.Context, prefix string) ([]Bucket, error) { o, err := s.api.ListBucketsWithContext(ctx, &s3.ListBucketsInput{}) if err != nil { return nil, err } var buckets []Bucket for _, b := range o.Buckets { bucketName := aws.StringValue(b.Name) if prefix == "" || strings.HasPrefix(bucketName, prefix) { buckets = append(buckets, Bucket{ CreationDate: aws.TimeValue(b.CreationDate), Name: bucketName, }) } } return buckets, nil } // MakeBucket creates an S3 bucket with the given name. func (s *S3) MakeBucket(ctx context.Context, name string) error { if s.dryRun { return nil } _, err := s.api.CreateBucketWithContext(ctx, &s3.CreateBucketInput{ Bucket: aws.String(name), }) return err } // SessionCache holds session.Session according to s3Opts and it synchronizes // access/modification. type SessionCache struct { sync.Mutex sessions map[Options]*session.Session } // newSession initializes a new AWS session with region fallback and custom // options. func (sc *SessionCache) newSession(ctx context.Context, opts Options) (*session.Session, error) { sc.Lock() defer sc.Unlock() if sess, ok := sc.sessions[opts]; ok { return sess, nil } awsCfg := aws.NewConfig() endpointURL, err := parseEndpoint(opts.Endpoint) if err != nil { return nil, err } // use virtual-host-style if the endpoint is known to support it, // otherwise use the path-style approach. isVirtualHostStyle := isVirtualHostStyle(endpointURL) useAccelerate := supportsTransferAcceleration(endpointURL) // AWS SDK handles transfer acceleration automatically. Setting the // Endpoint to a transfer acceleration endpoint would cause bucket // operations fail. if useAccelerate { endpointURL = sentinelURL } var httpClient *http.Client if opts.NoVerifySSL { httpClient = insecureHTTPClient } awsCfg = awsCfg. WithEndpoint(endpointURL.String()). WithS3ForcePathStyle(!isVirtualHostStyle). WithS3UseAccelerate(useAccelerate). WithHTTPClient(httpClient) awsCfg.Retryer = newCustomRetryer(opts.MaxRetries) useSharedConfig := session.SharedConfigEnable { // Reverse of what the SDK does: if AWS_SDK_LOAD_CONFIG is 0 (or a // falsy value) disable shared configs loadCfg := os.Getenv("AWS_SDK_LOAD_CONFIG") if loadCfg != "" { if enable, _ := strconv.ParseBool(loadCfg); !enable { useSharedConfig = session.SharedConfigDisable } } } sess, err := session.NewSessionWithOptions( session.Options{ Config: *awsCfg, SharedConfigState: useSharedConfig, }, ) if err != nil { return nil, err } // get region of the bucket and create session accordingly. if the region // is not provided, it means we want region-independent session // for operations such as listing buckets, making a new bucket etc. if err := setSessionRegion(ctx, sess, opts.bucket); err != nil { return nil, err } sc.sessions[opts] = sess return sess, nil } func (sc *SessionCache) clear() { sc.Lock() defer sc.Unlock() sc.sessions = map[Options]*session.Session{} } func setSessionRegion(ctx context.Context, sess *session.Session, bucket string) error { if aws.StringValue(sess.Config.Region) == "" { sess.Config.Region = aws.String(endpoints.UsEast1RegionID) } if bucket == "" { return nil } region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "", func(r *request.Request) { r.Config.Credentials = sess.Config.Credentials }) if err != nil { if errHasCode(err, "NotFound") { return err } // don't deny any request to the service if region auto-fetching // receives an error. Delegate error handling to command execution. err = fmt.Errorf("session: fetching region failed: %v", err) msg := log.ErrorMessage{Err: err.Error()} log.Error(msg) } else { sess.Config.Region = aws.String(region) } return nil } // customRetryer wraps the SDK's built in DefaultRetryer adding additional // error codes. Such as, retry for S3 InternalError code. type customRetryer struct { client.DefaultRetryer } func newCustomRetryer(maxRetries int) *customRetryer { return &customRetryer{ DefaultRetryer: client.DefaultRetryer{ NumMaxRetries: maxRetries, }, } } // ShouldRetry overrides SDK's built in DefaultRetryer, adding custom retry // logics that are not included in the SDK. func (c *customRetryer) ShouldRetry(req *request.Request) bool { shouldRetry := errHasCode(req.Error, "InternalError") || errHasCode(req.Error, "RequestTimeTooSkewed") || strings.Contains(req.Error.Error(), "connection reset") if !shouldRetry { shouldRetry = c.DefaultRetryer.ShouldRetry(req) } if shouldRetry && req.Error != nil { err := fmt.Errorf("retryable error: %v", req.Error) msg := log.DebugMessage{Err: err.Error()} log.Debug(msg) } return shouldRetry } var insecureHTTPClient = &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, }, } func supportsTransferAcceleration(endpoint urlpkg.URL) bool { return endpoint.Hostname() == transferAccelEndpoint } func isGoogleEndpoint(endpoint urlpkg.URL) bool { return endpoint.Hostname() == gcsEndpoint } // isVirtualHostStyle reports whether the given endpoint supports S3 virtual // host style bucket name resolving. If a custom S3 API compatible endpoint is // given, resolve the bucketname from the URL path. func isVirtualHostStyle(endpoint urlpkg.URL) bool { return endpoint == sentinelURL || supportsTransferAcceleration(endpoint) || isGoogleEndpoint(endpoint) } func errHasCode(err error, code string) bool { if err == nil || code == "" { return false } var awsErr awserr.Error if errors.As(err, &awsErr) { if awsErr.Code() == code { return true } } var multiUploadErr s3manager.MultiUploadFailure if errors.As(err, &multiUploadErr) { return errHasCode(multiUploadErr.OrigErr(), code) } return false } // IsCancelationError reports whether given error is a storage related // cancelation error. func IsCancelationError(err error) bool { return errHasCode(err, request.CanceledErrorCode) }
[ "\"AWS_SDK_LOAD_CONFIG\"" ]
[]
[ "AWS_SDK_LOAD_CONFIG" ]
[]
["AWS_SDK_LOAD_CONFIG"]
go
1
0
auth/ldap/ldap.go
/* Copyright (c) 2016 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ldap import ( "errors" "fmt" "os" "strings" "github.com/vmware/harbor/utils/log" "github.com/vmware/harbor/auth" "github.com/vmware/harbor/dao" "github.com/vmware/harbor/models" "github.com/mqu/openldap" ) // Auth implements Authenticator interface to authenticate against LDAP type Auth struct{} const metaChars = "&|!=~*<>()" // Authenticate checks user's credential against LDAP based on basedn template and LDAP URL, // if the check is successful a dummy record will be inserted into DB, such that this user can // be associated to other entities in the system. func (l *Auth) Authenticate(m models.AuthModel) (*models.User, error) { p := m.Principal for _, c := range metaChars { if strings.ContainsRune(p, c) { return nil, fmt.Errorf("the principal contains meta char: %q", c) } } ldapURL := os.Getenv("LDAP_URL") if ldapURL == "" { return nil, errors.New("Can not get any available LDAP_URL.") } log.Debug("ldapURL:", ldapURL) ldap, err := openldap.Initialize(ldapURL) if err != nil { return nil, err } ldap.SetOption(openldap.LDAP_OPT_PROTOCOL_VERSION, openldap.LDAP_VERSION3) ldapBaseDn := os.Getenv("LDAP_BASE_DN") if ldapBaseDn == "" { return nil, errors.New("Can not get any available LDAP_BASE_DN.") } log.Debug("baseDn:", ldapBaseDn) ldapSearchDn := os.Getenv("LDAP_SEARCH_DN") if ldapSearchDn != "" { log.Debug("Search DN: ", ldapSearchDn) ldapSearchPwd := os.Getenv("LDAP_SEARCH_PWD") err = ldap.Bind(ldapSearchDn, ldapSearchPwd) if err != nil { log.Debug("Bind search dn error", err) return nil, err } } attrName := os.Getenv("LDAP_UID") filter := os.Getenv("LDAP_FILTER") if filter != "" { filter = "(&" + filter + "(" + attrName + "=" + m.Principal + "))" } else { filter = "(" + attrName + "=" + m.Principal + ")" } log.Debug("one or more filter", filter) ldapScope := os.Getenv("LDAP_SCOPE") var scope int if ldapScope == "1" { scope = openldap.LDAP_SCOPE_BASE } else if ldapScope == "2" { scope = openldap.LDAP_SCOPE_ONELEVEL } else { scope = openldap.LDAP_SCOPE_SUBTREE } attributes := []string{"uid", "cn", "mail", "email"} result, err := ldap.SearchAll(ldapBaseDn, scope, filter, attributes) if err != nil { return nil, err } if len(result.Entries()) == 0 { log.Warningf("Not found an entry.") return nil, nil } else if len(result.Entries()) != 1 { log.Warningf("Found more than one entry.") return nil, nil } en := result.Entries()[0] bindDN := en.Dn() log.Debug("found entry:", en) err = ldap.Bind(bindDN, m.Password) if err != nil { log.Debug("Bind user error", err) return nil, err } defer ldap.Close() u := models.User{} for _, attr := range en.Attributes() { val := attr.Values()[0] switch attr.Name() { case "uid": u.Realname = val case "cn": u.Realname = val case "mail": u.Email = val case "email": u.Email = val } } u.Username = m.Principal log.Debug("username:", u.Username, ",email:", u.Email) exist, err := dao.UserExists(u, "username") if err != nil { return nil, err } if exist { currentUser, err := dao.GetUser(u) if err != nil { return nil, err } u.UserID = currentUser.UserID } else { u.Realname = m.Principal u.Password = "12345678AbC" u.Comment = "registered from LDAP." if u.Email == "" { u.Email = u.Username + "@placeholder.com" } userID, err := dao.Register(u) if err != nil { return nil, err } u.UserID = int(userID) } return &u, nil } func init() { auth.Register("ldap_auth", &Auth{}) }
[ "\"LDAP_URL\"", "\"LDAP_BASE_DN\"", "\"LDAP_SEARCH_DN\"", "\"LDAP_SEARCH_PWD\"", "\"LDAP_UID\"", "\"LDAP_FILTER\"", "\"LDAP_SCOPE\"" ]
[]
[ "LDAP_SEARCH_PWD", "LDAP_SCOPE", "LDAP_UID", "LDAP_FILTER", "LDAP_URL", "LDAP_BASE_DN", "LDAP_SEARCH_DN" ]
[]
["LDAP_SEARCH_PWD", "LDAP_SCOPE", "LDAP_UID", "LDAP_FILTER", "LDAP_URL", "LDAP_BASE_DN", "LDAP_SEARCH_DN"]
go
7
0
cmd/env_factory.go
package cmd import ( "os" "path/filepath" "time" "github.com/cppforlife/go-patch/patch" bihttpagent "github.com/cloudfoundry/bosh-agent/agentclient/http" biblobstore "github.com/cloudfoundry/bosh-cli/blobstore" bicloud "github.com/cloudfoundry/bosh-cli/cloud" biconfig "github.com/cloudfoundry/bosh-cli/config" bicpirel "github.com/cloudfoundry/bosh-cli/cpi/release" bidepl "github.com/cloudfoundry/bosh-cli/deployment" bidisk "github.com/cloudfoundry/bosh-cli/deployment/disk" biinstance "github.com/cloudfoundry/bosh-cli/deployment/instance" biinstancestate "github.com/cloudfoundry/bosh-cli/deployment/instance/state" bideplmanifest "github.com/cloudfoundry/bosh-cli/deployment/manifest" bideplrel "github.com/cloudfoundry/bosh-cli/deployment/release" bisshtunnel "github.com/cloudfoundry/bosh-cli/deployment/sshtunnel" bidepltpl "github.com/cloudfoundry/bosh-cli/deployment/template" bivm "github.com/cloudfoundry/bosh-cli/deployment/vm" boshtpl "github.com/cloudfoundry/bosh-cli/director/template" biindex "github.com/cloudfoundry/bosh-cli/index" boshinst "github.com/cloudfoundry/bosh-cli/installation" boshinstmanifest "github.com/cloudfoundry/bosh-cli/installation/manifest" bitarball "github.com/cloudfoundry/bosh-cli/installation/tarball" biregistry "github.com/cloudfoundry/bosh-cli/registry" boshrel "github.com/cloudfoundry/bosh-cli/release" birelsetmanifest "github.com/cloudfoundry/bosh-cli/release/set/manifest" bistatepkg "github.com/cloudfoundry/bosh-cli/state/pkg" bistemcell "github.com/cloudfoundry/bosh-cli/stemcell" bitemplate "github.com/cloudfoundry/bosh-cli/templatescompiler" bitemplateerb "github.com/cloudfoundry/bosh-cli/templatescompiler/erbrenderer" "github.com/cloudfoundry/bosh-utils/httpclient" ) type envFactory struct { deps BasicDeps manifestPath string manifestVars boshtpl.Variables manifestOp patch.Op deploymentStateService biconfig.DeploymentStateService installationManifestParser ReleaseSetAndInstallationManifestParser releaseManager boshinst.ReleaseManager releaseFetcher boshinst.ReleaseFetcher stemcellFetcher bistemcell.Fetcher cpiInstaller bicpirel.CpiInstaller targetProvider boshinst.TargetProvider cloudFactory bicloud.Factory diskManagerFactory bidisk.ManagerFactory vmManagerFactory bivm.ManagerFactory stemcellManagerFactory bistemcell.ManagerFactory instanceManagerFactory biinstance.ManagerFactory deploymentManagerFactory bidepl.ManagerFactory agentClientFactory bihttpagent.AgentClientFactory blobstoreFactory biblobstore.Factory deploymentFactory bidepl.Factory deploymentRecord bidepl.Record } func NewEnvFactory( deps BasicDeps, manifestPath string, statePath string, manifestVars boshtpl.Variables, manifestOp patch.Op, recreatePersistentDisks bool, ) *envFactory { f := envFactory{ deps: deps, manifestPath: manifestPath, manifestVars: manifestVars, manifestOp: manifestOp, } f.releaseManager = boshinst.NewReleaseManager(deps.Logger) releaseJobResolver := bideplrel.NewJobResolver(f.releaseManager) // todo expand path? workspaceRootPath := filepath.Join(os.Getenv("HOME"), ".bosh") { tarballCacheBasePath := filepath.Join(workspaceRootPath, "downloads") tarballCache := bitarball.NewCache(tarballCacheBasePath, deps.FS, deps.Logger) httpClient := httpclient.NewHTTPClient(httpclient.CreateDefaultClient(nil), deps.Logger) tarballProvider := bitarball.NewProvider( tarballCache, deps.FS, httpClient, 3, 500*time.Millisecond, deps.Logger) releaseProvider := boshrel.NewProvider( deps.CmdRunner, deps.Compressor, deps.DigestCalculator, deps.FS, deps.Logger) f.releaseFetcher = boshinst.NewReleaseFetcher( tarballProvider, releaseProvider.NewExtractingArchiveReader(), f.releaseManager, ) stemcellReader := bistemcell.NewReader(deps.Compressor, deps.FS) stemcellExtractor := bistemcell.NewExtractor(stemcellReader, deps.FS) f.stemcellFetcher = bistemcell.Fetcher{ TarballProvider: tarballProvider, StemcellExtractor: stemcellExtractor, } } f.deploymentStateService = biconfig.NewFileSystemDeploymentStateService( deps.FS, deps.UUIDGen, deps.Logger, biconfig.DeploymentStatePath(manifestPath, statePath)) { registryServer := biregistry.NewServerManager(deps.Logger) installerFactory := boshinst.NewInstallerFactory( deps.UI, deps.CmdRunner, deps.Compressor, releaseJobResolver, deps.UUIDGen, registryServer, deps.Logger, deps.FS, deps.DigestCreationAlgorithms) f.cpiInstaller = bicpirel.CpiInstaller{ ReleaseManager: f.releaseManager, InstallerFactory: installerFactory, Validator: bicpirel.NewValidator(), } } f.targetProvider = boshinst.NewTargetProvider( f.deploymentStateService, deps.UUIDGen, filepath.Join(workspaceRootPath, "installations")) { diskRepo := biconfig.NewDiskRepo(f.deploymentStateService, deps.UUIDGen) stemcellRepo := biconfig.NewStemcellRepo(f.deploymentStateService, deps.UUIDGen) vmRepo := biconfig.NewVMRepo(f.deploymentStateService) f.diskManagerFactory = bidisk.NewManagerFactory(diskRepo, deps.Logger) diskDeployer := bivm.NewDiskDeployer(f.diskManagerFactory, diskRepo, deps.Logger, recreatePersistentDisks) f.stemcellManagerFactory = bistemcell.NewManagerFactory(stemcellRepo) f.vmManagerFactory = bivm.NewManagerFactory( vmRepo, stemcellRepo, diskDeployer, deps.UUIDGen, deps.FS, deps.Logger) deploymentRepo := biconfig.NewDeploymentRepo(f.deploymentStateService) releaseRepo := biconfig.NewReleaseRepo(f.deploymentStateService, deps.UUIDGen) f.deploymentRecord = bidepl.NewRecord(deploymentRepo, releaseRepo, stemcellRepo) } { f.blobstoreFactory = biblobstore.NewBlobstoreFactory(deps.UUIDGen, deps.FS, deps.Logger) f.deploymentFactory = bidepl.NewFactory(10*time.Second, 500*time.Millisecond) f.agentClientFactory = bihttpagent.NewAgentClientFactory(1*time.Second, deps.Logger) f.cloudFactory = bicloud.NewFactory(deps.FS, deps.CmdRunner, deps.Logger) } { erbRenderer := bitemplateerb.NewERBRenderer(deps.FS, deps.CmdRunner, deps.Logger) jobRenderer := bitemplate.NewJobRenderer(erbRenderer, deps.FS, deps.UUIDGen, deps.Logger) builderFactory := biinstancestate.NewBuilderFactory( bistatepkg.NewCompiledPackageRepo(biindex.NewInMemoryIndex()), releaseJobResolver, bitemplate.NewJobListRenderer(jobRenderer, deps.Logger), bitemplate.NewRenderedJobListCompressor(deps.FS, deps.Compressor, deps.DigestCalculator, deps.Logger), deps.Logger, ) sshTunnelFactory := bisshtunnel.NewFactory(deps.Logger) instanceFactory := biinstance.NewFactory(builderFactory) f.instanceManagerFactory = biinstance.NewManagerFactory( sshTunnelFactory, instanceFactory, deps.Logger) } { releaseSetValidator := birelsetmanifest.NewValidator(deps.Logger) releaseSetParser := birelsetmanifest.NewParser(deps.FS, deps.Logger, releaseSetValidator) installValidator := boshinstmanifest.NewValidator(deps.Logger) installParser := boshinstmanifest.NewParser(deps.FS, deps.UUIDGen, deps.Logger, installValidator) f.installationManifestParser = ReleaseSetAndInstallationManifestParser{ ReleaseSetParser: releaseSetParser, InstallationParser: installParser, } } return &f } func (f *envFactory) Preparer() DeploymentPreparer { return NewDeploymentPreparer( f.deps.UI, f.deps.Logger, "DeploymentPreparer", f.deploymentStateService, biconfig.NewLegacyDeploymentStateMigrator( f.deploymentStateService, f.deps.FS, f.deps.UUIDGen, f.deps.Logger, ), f.releaseManager, f.deploymentRecord, f.cloudFactory, f.stemcellManagerFactory, f.agentClientFactory, f.vmManagerFactory, f.blobstoreFactory, bidepl.NewDeployer( f.vmManagerFactory, f.instanceManagerFactory, f.deploymentFactory, f.deps.Logger, ), f.manifestPath, f.manifestVars, f.manifestOp, f.cpiInstaller, f.releaseFetcher, f.stemcellFetcher, f.installationManifestParser, NewDeploymentManifestParser( bideplmanifest.NewParser(f.deps.FS, f.deps.Logger), bideplmanifest.NewValidator(f.deps.Logger), f.releaseManager, bidepltpl.NewDeploymentTemplateFactory(f.deps.FS), ), NewTempRootConfigurator(f.deps.FS), f.targetProvider, ) } func (f *envFactory) Deleter() DeploymentDeleter { return NewDeploymentDeleter( f.deps.UI, "DeploymentDeleter", f.deps.Logger, f.deploymentStateService, f.releaseManager, f.cloudFactory, f.agentClientFactory, f.blobstoreFactory, bidepl.NewManagerFactory( f.vmManagerFactory, f.instanceManagerFactory, f.diskManagerFactory, f.stemcellManagerFactory, f.deploymentFactory, ), f.manifestPath, f.manifestVars, f.manifestOp, f.cpiInstaller, boshinst.NewUninstaller(f.deps.FS, f.deps.Logger), f.releaseFetcher, f.installationManifestParser, NewTempRootConfigurator(f.deps.FS), f.targetProvider, ) } func (f *envFactory) StateManager() DeploymentStateManager { return NewDeploymentStateManager( f.deps.UI, "DeploymentStateManager", f.deps.Logger, f.deploymentStateService, f.agentClientFactory, bidepl.NewManagerFactory( f.vmManagerFactory, f.instanceManagerFactory, f.diskManagerFactory, f.stemcellManagerFactory, f.deploymentFactory, ), f.manifestPath, f.manifestVars, f.manifestOp, f.installationManifestParser, NewDeploymentManifestParser( bideplmanifest.NewParser(f.deps.FS, f.deps.Logger), bideplmanifest.NewValidator(f.deps.Logger), f.releaseManager, bidepltpl.NewDeploymentTemplateFactory(f.deps.FS), ), ) }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
components/compliance-service/compliance.go
package compliance import ( "context" "fmt" "io" "net" "net/http" "os" "strconv" "time" "github.com/pkg/errors" "github.com/chef/automate/api/external/secrets" "github.com/chef/automate/api/interservice/authn" "github.com/chef/automate/api/interservice/authz" "github.com/chef/automate/api/interservice/compliance/ingest/ingest" "github.com/chef/automate/api/interservice/compliance/jobs" "github.com/chef/automate/api/interservice/compliance/profiles" "github.com/chef/automate/api/interservice/compliance/reporting" "github.com/chef/automate/api/interservice/compliance/stats" "github.com/chef/automate/api/interservice/compliance/status" "github.com/chef/automate/api/interservice/compliance/version" "github.com/chef/automate/api/interservice/data_lifecycle" "github.com/chef/automate/api/interservice/es_sidecar" "github.com/chef/automate/api/interservice/event" aEvent "github.com/chef/automate/api/interservice/event" "github.com/chef/automate/api/interservice/nodemanager/manager" "github.com/chef/automate/api/interservice/nodemanager/nodes" jobsserver "github.com/chef/automate/components/compliance-service/api/jobs/server" profilesserver "github.com/chef/automate/components/compliance-service/api/profiles/server" reportingserver "github.com/chef/automate/components/compliance-service/api/reporting/server" statsserver "github.com/chef/automate/components/compliance-service/api/stats/server" statusserver "github.com/chef/automate/components/compliance-service/api/status/server" versionserver "github.com/chef/automate/components/compliance-service/api/version/server" "github.com/chef/automate/components/compliance-service/config" "github.com/chef/automate/components/compliance-service/dao/pgdb" "github.com/chef/automate/components/compliance-service/ingest/ingestic" "github.com/chef/automate/components/compliance-service/ingest/ingestic/mappings" ingestserver "github.com/chef/automate/components/compliance-service/ingest/server" "github.com/chef/automate/components/compliance-service/inspec" "github.com/chef/automate/components/compliance-service/inspec-agent/remote" "github.com/chef/automate/components/compliance-service/inspec-agent/resolver" "github.com/chef/automate/components/compliance-service/inspec-agent/runner" "github.com/chef/automate/components/compliance-service/inspec-agent/scheduler" "github.com/chef/automate/components/compliance-service/reporting/relaxting" "github.com/chef/automate/components/compliance-service/scanner" "github.com/chef/automate/components/compliance-service/utils/logging" notifications "github.com/chef/automate/components/notifications-client/api" "github.com/chef/automate/components/notifications-client/notifier" project_update_lib "github.com/chef/automate/lib/authz" "github.com/chef/automate/lib/cereal" "github.com/chef/automate/lib/cereal/postgres" "github.com/chef/automate/lib/datalifecycle/purge" "github.com/chef/automate/lib/grpc/secureconn" "github.com/chef/automate/lib/tracing" "github.com/golang/mock/gomock" "github.com/golang/protobuf/ptypes/empty" "github.com/sirupsen/logrus" "github.com/teambition/rrule-go" "google.golang.org/grpc" "google.golang.org/grpc/reflection" ) type serviceState int const ( serviceStateUnknown = iota serviceStateStarting serviceStateStarted ) var SERVICE_STATE serviceState var ( PurgeWorkflowName = cereal.NewWorkflowName("purge") PurgeScheduleName = "periodic_purge" ) func createESBackend(servConf *config.Compliance) relaxting.ES2Backend { // define the ElasticSearch backend config with legacy automate auth esr := relaxting.ES2Backend{ ESUrl: servConf.ElasticSearch.Url, Enterprise: servConf.Delivery.Enterprise, ChefDeliveryUser: servConf.Delivery.User, ChefDeliveryToken: servConf.Delivery.Token, } return esr } func createPGBackend(conf *config.Postgres) (*pgdb.DB, error) { // define the Postgres Scanner backend return pgdb.New(conf) } // here we execute migrations, create the es and pg backends, read certs, set up the needed env vars, // and modify config info func initBits(ctx context.Context, conf *config.Compliance) (db *pgdb.DB, connFactory *secureconn.Factory, esr relaxting.ES2Backend, statusSrv *statusserver.Server, err error) { statusSrv = statusserver.New() statusserver.AddMigrationUpdate(statusSrv, statusserver.MigrationLabelPG, "Initializing DB connection and schema migration...") // start pg backend db, err = createPGBackend(&conf.Postgres) if err != nil { statusserver.AddMigrationUpdate(statusSrv, statusserver.MigrationLabelPG, err.Error()) statusserver.AddMigrationUpdate(statusSrv, statusserver.MigrationLabelPG, statusserver.MigrationFailedMsg) return db, connFactory, esr, statusSrv, errors.Wrap(err, "createPGBackend failed") } statusserver.AddMigrationUpdate(statusSrv, statusserver.MigrationLabelPG, statusserver.MigrationCompletedMsg) // create esconfig info backend esr = createESBackend(conf) backendCacheBool, err := strconv.ParseBool(conf.InspecAgent.BackendCache) if err != nil { logrus.Errorf("Unable to parse value for inspec agent backend cache configuration, %s - Valid configuration options are 'true' and 'false' ", conf.InspecAgent.BackendCache) inspec.BackendCache = true } else { inspec.BackendCache = backendCacheBool } inspec.ResultMessageLimit = conf.InspecAgent.ResultMessageLimit runner.ControlResultsLimit = conf.InspecAgent.ControlResultsLimit runner.RunTimeLimit = conf.InspecAgent.RunTimeLimit inspec.TmpDir = conf.InspecAgent.TmpDir // Let's have something sensible if the temp dir is not specified if inspec.TmpDir == "" { inspec.TmpDir = "/tmp" } err = os.Setenv("TMPDIR", inspec.TmpDir) if err != nil { return db, connFactory, esr, statusSrv, errors.Wrap(err, "Unable to set TMPDIR env variable") } serviceCerts, err := conf.Service.ReadCerts() if err != nil { return db, connFactory, esr, statusSrv, errors.Wrap(err, "Unable to load service certificates") } connFactory = secureconn.NewFactory(*serviceCerts) conf.Secrets.Endpoint = fmt.Sprintf("%s:%d", conf.Secrets.HostBind, conf.Secrets.Port) conf.Authz.Endpoint = fmt.Sprintf("%s:%d", conf.Authz.HostBind, conf.Authz.Port) conf.Manager.Endpoint = fmt.Sprintf("%s:%d", conf.Manager.Host, conf.Manager.Port) conf.Service.Endpoint = fmt.Sprintf("%s:%d", conf.Service.HostBind, conf.Service.Port) return db, connFactory, esr, statusSrv, nil } // register all the services, start the grpc server, and call setup func serveGrpc(ctx context.Context, db *pgdb.DB, connFactory *secureconn.Factory, esr relaxting.ES2Backend, conf config.Compliance, binding string, statusSrv *statusserver.Server, cerealManager *cereal.Manager) { lis, err := net.Listen("tcp", binding) if err != nil { logrus.Fatalf("failed to listen: %v", err) } esClient, err := esr.ES2Client() if err != nil { logrus.Fatalf("could not connect to elasticsearch: %v", err) } var authzProjectsClient authz.ProjectsClient eventClient := getEventConnection(connFactory, conf.EventConfig.Endpoint) notifier := getNotificationsConnection(connFactory, conf.Notifications.Target) if os.Getenv("RUN_MODE") != "test" { authzProjectsClient = createAuthzProjectsClient(connFactory, conf.Authz.Endpoint) } else { logrus.Infof("not getting authz client; env var RUN_MODE found. value is 'test' ") } nodeManagerServiceClient := getManagerConnection(connFactory, conf.Manager.Endpoint) ingesticESClient := ingestic.NewESClient(esClient) ingesticESClient.InitializeStore(context.Background()) runner.ESClient = ingesticESClient s := connFactory.NewServer(tracing.GlobalServerInterceptor()) if os.Getenv("RUN_MODE") == "test" { logrus.Warn(`Skipping project-update manager setup due to RUN_MODE env var being set to "test"`) } else { cerealProjectUpdateManager, err := createProjectUpdateCerealManager(connFactory, conf.CerealConfig.Endpoint) if err != nil { logrus.WithError(err).Fatal("could not create cereal manager") } err = project_update_lib.RegisterTaskExecutors(cerealProjectUpdateManager, "compliance", ingesticESClient, authzProjectsClient) if err != nil { logrus.WithError(err).Fatal("could not register project update task executors") } err = project_update_lib.RegisterSerialTaskExecutors(cerealProjectUpdateManager, "compliance", ingesticESClient, authzProjectsClient) if err != nil { logrus.WithError(err).Fatal("could not register project update task executors") } if err := cerealProjectUpdateManager.Start(ctx); err != nil { logrus.WithError(err).Fatal("could not start cereal manager") } } // needs to be the first one, since it creates the es indices ingest.RegisterComplianceIngesterServer(s, ingestserver.NewComplianceIngestServer(ingesticESClient, nodeManagerServiceClient, conf.InspecAgent.AutomateFQDN, notifier, authzProjectsClient, conf.Service.MessageBufferSize)) jobs.RegisterJobsServiceServer(s, jobsserver.New(db, connFactory, eventClient, conf.Manager.Endpoint, cerealManager)) reporting.RegisterReportingServiceServer(s, reportingserver.New(&esr)) ps := profilesserver.New(db, &esr, ingesticESClient, &conf.Profiles, eventClient, statusSrv) profiles.RegisterProfilesServiceServer(s, ps) profiles.RegisterProfilesAdminServiceServer(s, ps) stats.RegisterStatsServiceServer(s, statsserver.New(&esr)) version.RegisterVersionServiceServer(s, versionserver.New()) status.RegisterComplianceStatusServer(s, statusSrv) if os.Getenv("RUN_MODE") == "test" { logrus.Warn(`Skipping data-lifecycle setup due to RUN_MODE env var being set to "test"`) } else { purgeServer, err := setupDataLifecyclePurgeInterface(ctx, connFactory, conf, cerealManager) if err != nil { logrus.Fatalf("serveGrpc aborting, can't setup purge server: %s", err) } data_lifecycle.RegisterPurgeServer(s, purgeServer) } // Register reflection service on gRPC server. reflection.Register(s) logrus.Info("Starting GRPC server on " + binding) // running ElasticSearch migration err = relaxting.RunMigrations(esr, statusSrv) if err != nil { logrus.Fatalf("serveGrpc aborting, unable to run migrations: %v", err) } errc := make(chan error) defer close(errc) go func() { err := s.Serve(lis) errc <- errors.Wrap(err, "Serve") }() // `setup` depends on `Serve` because it dials back to the compliance-service itself. // For this to work we launch `Serve` in a goroutine and connect WithBlock to itself and other dependent services from `setup` // A connect timeout is used to ensure error reporting in the event of failures to connect err = setup(ctx, connFactory, conf, esr, db, cerealManager) if err != nil { logrus.Fatalf("serveGrpc aborting, we have a problem, setup failed: %s", err.Error()) } // block here: there's at most one error we care about err = <-errc // if we reach this, we've had an issue in Serve() logrus.Fatalf("serveGrpc aborting, we have a problem: %s", err.Error()) } func createProjectUpdateCerealManager(connFactory *secureconn.Factory, address string) (*cereal.Manager, error) { conn, err := connFactory.Dial("cereal-service", address) if err != nil { return nil, errors.Wrap(err, "error dialing cereal service") } grpcBackend := project_update_lib.ProjectUpdateBackend(conn) manager, err := cereal.NewManager(grpcBackend) if err != nil { grpcBackend.Close() // nolint: errcheck return nil, err } return manager, nil } func getEventConnection(connectionFactory *secureconn.Factory, eventEndpoint string) aEvent.EventServiceClient { if eventEndpoint == "" || eventEndpoint == ":0" { if os.Getenv("RUN_MODE") == "test" { logrus.Infof("using mock Event service Client") eventServiceClientMock := event.NewMockEventServiceClient(gomock.NewController(nil)) eventServiceClientMock.EXPECT().Publish(gomock.Any(), gomock.Any()).AnyTimes().Return( &event.PublishResponse{}, nil) return eventServiceClientMock } logrus.Fatalf("eventEndpoint cannot be empty or Dial will get stuck") } logrus.Debugf("Connecting to event-service %q", eventEndpoint) timeoutCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() conn, err := connectionFactory.DialContext(timeoutCtx, "event-service", eventEndpoint, grpc.WithBlock()) if err != nil { logrus.Fatalf("compliance setup, error grpc dialing to event-service aborting...") } // get event client eventClient := aEvent.NewEventServiceClient(conn) if eventClient == nil { logrus.Fatalf("compliance setup, could not obtain automate events service client: %s", err) } return eventClient } func getNotificationsConnection(connectionFactory *secureconn.Factory, notificationsEndpoint string) notifier.Notifier { if notificationsEndpoint == "" || notificationsEndpoint == ":0" { if os.Getenv("RUN_MODE") == "test" { logrus.Infof("using mock Notifications") return &NotifierMock{} } logrus.Fatalf("notificationsEndpoint cannot be empty or Dial will get stuck") } logrus.Debugf("Connecting to notifications-service %q", notificationsEndpoint) timeoutCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() conn, err := connectionFactory.DialContext(timeoutCtx, "notifications-service", notificationsEndpoint, grpc.WithBlock()) if err != nil { logrus.Fatalf("getNotificationsConnection, error grpc dialing to manager %s", err.Error()) } notifier := notifier.New(conn) if notifier == nil { logrus.Fatalf("compliance setup, could not obtain notification client: %s", err) } return notifier } func createAuthzProjectsClient(connectionFactory *secureconn.Factory, authzEndpoint string) authz.ProjectsClient { if authzEndpoint == "" || authzEndpoint == ":0" { logrus.Fatal("authzEndpoint cannot be empty or Dial will get stuck") } logrus.Debugf("Connecting to authz-service %q", authzEndpoint) timeoutCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() conn, err := connectionFactory.DialContext(timeoutCtx, "authz-service", authzEndpoint, grpc.WithBlock()) if err != nil { logrus.Fatalf("getAuthzConnection, error grpc dialing to Authz %s", err.Error()) } authzProjectsClient := authz.NewProjectsClient(conn) if authzProjectsClient == nil { logrus.Fatalf("getAuthzConnection got nil for NewProjectsClient") } return authzProjectsClient } func getManagerConnection(connectionFactory *secureconn.Factory, managerEndpoint string) manager.NodeManagerServiceClient { if managerEndpoint == "" || managerEndpoint == ":0" { if os.Getenv("RUN_MODE") == "test" { logrus.Infof("using mock NodeManagerMock") return &NodeManagerMock{} } logrus.Fatal("managerEndpoint cannot be empty or Dial will get stuck") } logrus.Debugf("Connecting to nodemanager-service %q", managerEndpoint) timeoutCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() conn, err := connectionFactory.DialContext(timeoutCtx, "nodemanager-service", managerEndpoint, grpc.WithBlock()) if err != nil { logrus.Fatalf("getManagerConnection, error grpc dialing to manager %s", err.Error()) } mgrClient := manager.NewNodeManagerServiceClient(conn) if mgrClient == nil { logrus.Fatalf("getManagerConnection got nil for NewNodeManagerServiceClient") } return mgrClient } func setupDataLifecyclePurgeInterface(ctx context.Context, connFactory *secureconn.Factory, conf config.Compliance, cerealManager *cereal.Manager) (*purge.Server, error) { var ( compSIndex = fmt.Sprintf("comp-%s-s", mappings.ComplianceCurrentTimeSeriesIndicesVersion) compSName = "compliance-scans" compRIndex = fmt.Sprintf("comp-%s-r", mappings.ComplianceCurrentTimeSeriesIndicesVersion) compRName = "compliance-reports" defaultPurgePolicies = &purge.Policies{ Es: map[string]purge.EsPolicy{ compSName: { Name: compSName, IndexName: compSIndex, OlderThanDays: conf.ComplianceReportDays, }, compRName: { Name: compRName, IndexName: compRIndex, OlderThanDays: conf.ComplianceReportDays, }, }, } err error esSidecarConn *grpc.ClientConn esSidecarClient es_sidecar.EsSidecarClient recurrence *rrule.RRule ) // Migrate default policy values from the config. The default policies are // only persisted the first time the workflow is created, after which only // new default policies are added and/or existing policies indices are // updated in case they have been migrated. for i, p := range defaultPurgePolicies.Es { if conf.ComplianceReportDays < 0 { p.Disabled = true } p.OlderThanDays = conf.ComplianceReportDays defaultPurgePolicies.Es[i] = p } timeoutCtx, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() addr := conf.ElasticSearchSidecar.Address logrus.WithField("address", addr).Info("Connecting to Elasticsearch Sidecar") esSidecarConn, err = connFactory.DialContext(timeoutCtx, "es-sidecar-service", addr, grpc.WithBlock()) if err != nil || esSidecarConn == nil { logrus.WithFields(logrus.Fields{"error": err}).Fatal("Failed to create ES Sidecar connection") return nil, err } esSidecarClient = es_sidecar.NewEsSidecarClient(esSidecarConn) err = purge.ConfigureManager( cerealManager, PurgeWorkflowName, purge.WithTaskEsSidecarClient(esSidecarClient), ) if err != nil { return nil, errors.Wrapf(err, "failed to configure %s workflow", PurgeWorkflowName) } recurrence, err = rrule.NewRRule(rrule.ROption{ Freq: rrule.DAILY, Interval: 1, Dtstart: time.Now(), }) if err != nil { return nil, errors.Wrapf(err, "could not create recurrence rule for %s", PurgeScheduleName) } err = purge.CreateOrUpdatePurgeWorkflow( timeoutCtx, cerealManager, PurgeScheduleName, PurgeWorkflowName, defaultPurgePolicies, true, recurrence, ) if err != nil { return nil, errors.Wrap(err, "failed to create or update purge workflow schedule") } return purge.NewServer( cerealManager, PurgeScheduleName, PurgeWorkflowName, defaultPurgePolicies, purge.WithServerEsSidecarClient(esSidecarClient), ) } func setup(ctx context.Context, connFactory *secureconn.Factory, conf config.Compliance, esr relaxting.ES2Backend, db *pgdb.DB, cerealManager *cereal.Manager) error { var err error var conn, mgrConn, secretsConn, authnConn, authzConn *grpc.ClientConn timeoutCtx, cancel := context.WithTimeout(ctx, 15*time.Second) defer cancel() // get compliance connection for ingest logrus.Debugf("compliance setup, dialing compliance-service manager(%s)", conf.Service.Endpoint) conn, err = connFactory.DialContext(timeoutCtx, "compliance-service", conf.Service.Endpoint, grpc.WithBlock()) if err != nil || conn == nil { err = errors.New("compliance setup, error grpc dialing to compliance-service...") return err } // get ingest client logrus.Debugf("compliance setup, getting an ingest client") ingestClient := ingest.NewComplianceIngesterClient(conn) if ingestClient == nil { return fmt.Errorf("compliance setup, got nil for NewComplianceIngesterClient") } // get nodemanager connection logrus.Debugf("compliance setup, dialing nodemanager(%s)", conf.Manager.Endpoint) mgrConn, err = connFactory.DialContext(timeoutCtx, "nodemanager-service", conf.Manager.Endpoint, grpc.WithBlock()) if err != nil || mgrConn == nil { err = errors.New("compliance setup, error grpc dialing to manager aborting...") return err } // get nodemanager client logrus.Debugf("compliance setup, getting a node manager client") mgrClient := manager.NewNodeManagerServiceClient(mgrConn) if mgrClient == nil { return fmt.Errorf("compliance setup, got nil for NewNodeManagerServiceClient") } // get nodes client logrus.Debugf("compliance setup, getting a nodes client") nodesClient := nodes.NewNodesServiceClient(mgrConn) if nodesClient == nil { return fmt.Errorf("compliance setup, got nil for NewNodesServiceClient") } // get secrets connection logrus.Debugf("compliance setup, dialing secrets-service(%s)", conf.Secrets.Endpoint) secretsConn, err = connFactory.DialContext(timeoutCtx, "secrets-service", conf.Secrets.Endpoint, grpc.WithBlock()) if err != nil { return fmt.Errorf("compliance setup, error grpc dialing to secrets") } // get secrets client logrus.Debugf("compliance setup, getting a secrets service client") secretsClient := secrets.NewSecretsServiceClient(secretsConn) if secretsClient == nil { return fmt.Errorf("compliance setup, could not obtain secrets service client") } // set up the scanner, scheduler, and runner servers with needed clients // these are all inspec-agent packages scanner := scanner.New(mgrClient, nodesClient, db) resolver := resolver.New(mgrClient, nodesClient, db, secretsClient) err = runner.InitCerealManager(cerealManager, conf.InspecAgent.JobWorkers, ingestClient, scanner, resolver, conf.RemoteInspecVersion) if err != nil { return errors.Wrap(err, "failed to initialize cereal manager") } err = cerealManager.Start(ctx) if err != nil { return errors.Wrap(err, "failed to start cereal manager") } schedulerServer := scheduler.New(scanner, cerealManager) // start polling for jobs with a recurrence schedule that are due to run. // this function will sleep for one minute, then query the db for all jobs // with recurrence and check if it's time to run the job go schedulerServer.PollForJobs(ctx) if os.Getenv("RUN_MODE") == "test" { logrus.Infof(`Skipping AUTHN client setup due to RUN_MODE env var being set to "test"`) } else { // get the authn-service connection logrus.Debugf("compliance setup, dialing authn-service(%s)", conf.InspecAgent.AuthnTarget) authnConn, err = connFactory.DialContext(timeoutCtx, "authn-service", conf.InspecAgent.AuthnTarget, grpc.WithBlock()) if err != nil || authnConn == nil { err = errors.New("compliance setup, error grpc dialing to authn aborting...") return err } // get the authn client authnClient := authn.NewTokensMgmtClient(authnConn) if authnClient == nil { logrus.Errorf("serveGrpc got nil for NewTokensMgmtClient: %s", err) return err } // get authz connection authzConn, err = connFactory.DialContext(timeoutCtx, "authz-service", fmt.Sprintf("%s:%d", conf.Authz.HostBind, conf.Authz.Port), grpc.WithBlock()) if err != nil || authzConn == nil { err = errors.New("compliance setup, error grpc dialing to authz aborting...") return err } // get the authz client authzClient := authz.NewPoliciesClient(authzConn) if authzClient == nil { logrus.Errorf("serveGrpc got nil for NewPoliciesClient: %s", err) return err } // in order to execute scan jobs remotely (i.e. on a different server, reporting back out // to automate), we need access to the auth client for a token and the automate fqdn for reporting remote.RemoteJobInfo = remote.RemoteJob{ PoliciesClient: authzClient, TokensMgmtClient: authnClient, AutomateFQDN: conf.InspecAgent.AutomateFQDN, } } SERVICE_STATE = serviceStateStarted return nil } // Serve grpc func Serve(conf config.Compliance, grpcBinding string) error { SERVICE_STATE = serviceStateUnknown logging.SetLogLevel(conf.Service.LogLevel) ctx := context.Background() db, connFactory, esr, statusSrv, err := initBits(ctx, &conf) if err != nil { return err } SERVICE_STATE = serviceStateStarting cerealManager, err := cereal.NewManager(postgres.NewPostgresBackend(conf.Postgres.ConnectionString)) if err != nil { return err } defer func() { err := cerealManager.Stop() if err != nil { logrus.WithError(err).Error("could not stop cereal manager") } }() go serveGrpc(ctx, db, connFactory, esr, conf, grpcBinding, statusSrv, cerealManager) // nolint: errcheck cfg := NewServiceConfig(&conf, connFactory) return cfg.serveCustomRoutes() } // ServiceInfo holds service listen info type ServiceInfo struct { HostBind string Port int ServerBind string connFactory *secureconn.Factory } //TODO(jaym) If these don't get exposed in the gateway, we need to provide the http server certs // this custom route is used by the inspec-agent scanner to retrieve profile tars for scan execution func (conf *ServiceInfo) serveCustomRoutes() error { conf.ServerBind = fmt.Sprintf("%s:%d", conf.HostBind, conf.Port) serveAddress := fmt.Sprintf("127.0.0.1:%d", 2133) // Similarly hard-coded in inspec-agent _, cancel := context.WithCancel(context.Background()) defer cancel() r := http.NewServeMux() r.HandleFunc("/profiles/tar", conf.ProfileTarHandler) return http.ListenAndServe(serveAddress, r) } // NewServiceConfig returns a ServiceInfo instance func NewServiceConfig(cfg *config.Compliance, connFactory *secureconn.Factory) *ServiceInfo { return &ServiceInfo{ HostBind: cfg.Service.HostBind, Port: cfg.Service.Port, connFactory: connFactory, } } // ProfileTarHandler is the http handler for profile tarballs, used by the inspec-agent // for executing scans func (conf *ServiceInfo) ProfileTarHandler(w http.ResponseWriter, r *http.Request) { if err := r.ParseForm(); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } profileName := r.Form.Get("name") profileVersion := r.Form.Get("version") profileOwner := r.Form.Get("owner") conn, err := conf.connFactory.Dial("compliance-service", conf.ServerBind) if err != nil { msg := fmt.Sprintf("grpc service unavailable %s", conf.ServerBind) http.Error(w, msg, http.StatusServiceUnavailable) return } defer conn.Close() // nolint: errcheck profilesClient := profiles.NewProfilesServiceClient(conn) stream, err := profilesClient.ReadTar(context.Background(), &profiles.ProfileDetails{Name: profileName, Version: profileVersion, Owner: profileOwner}) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } for { data, err := stream.Recv() if err == io.EOF { break } if err != nil { http.Error(w, err.Error(), http.StatusNotFound) return } contentLength := strconv.Itoa(len(data.GetData())) w.Header().Set("Content-Length", contentLength) w.Header().Set("Content-Type", "application/x-gzip") w.Header().Set("Accept-Ranges", "bytes") w.Write(data.GetData()) // nolint: errcheck } } type NotifierMock struct { } func (n *NotifierMock) Send(context.Context, *notifications.Event) { } func (n *NotifierMock) QueueSize() int { return 0 } type NodeManagerMock struct { } func (nm *NodeManagerMock) Create(ctx context.Context, in *manager.NodeManager, opts ...grpc.CallOption) (*manager.Ids, error) { return &manager.Ids{}, nil } func (nm *NodeManagerMock) Read(ctx context.Context, in *manager.Id, opts ...grpc.CallOption) (*manager.NodeManager, error) { return &manager.NodeManager{}, nil } func (nm *NodeManagerMock) Update(ctx context.Context, in *manager.NodeManager, opts ...grpc.CallOption) (*empty.Empty, error) { return &empty.Empty{}, nil } func (nm *NodeManagerMock) Delete(ctx context.Context, in *manager.Id, opts ...grpc.CallOption) (*empty.Empty, error) { return &empty.Empty{}, nil } func (nm *NodeManagerMock) DeleteWithNodes(ctx context.Context, in *manager.Id, opts ...grpc.CallOption) (*manager.Ids, error) { return &manager.Ids{}, nil } func (nm *NodeManagerMock) DeleteWithNodeStateStopped(ctx context.Context, in *manager.Id, opts ...grpc.CallOption) (*empty.Empty, error) { return &empty.Empty{}, nil } func (nm *NodeManagerMock) DeleteWithNodeStateTerminated(ctx context.Context, in *manager.Id, opts ...grpc.CallOption) (*empty.Empty, error) { return &empty.Empty{}, nil } func (nm *NodeManagerMock) List(ctx context.Context, in *manager.Query, opts ...grpc.CallOption) (*manager.NodeManagers, error) { return &manager.NodeManagers{}, nil } func (nm *NodeManagerMock) Connect(ctx context.Context, in *manager.NodeManager, opts ...grpc.CallOption) (*empty.Empty, error) { return &empty.Empty{}, nil } func (nm *NodeManagerMock) ConnectManager(ctx context.Context, in *manager.Id, opts ...grpc.CallOption) (*empty.Empty, error) { return &empty.Empty{}, nil } func (nm *NodeManagerMock) SearchNodeFields(ctx context.Context, in *manager.FieldQuery, opts ...grpc.CallOption) (*manager.Fields, error) { return &manager.Fields{}, nil } func (nm *NodeManagerMock) SearchNodes(ctx context.Context, in *manager.NodeQuery, opts ...grpc.CallOption) (*manager.Nodes, error) { return &manager.Nodes{}, nil } func (nm *NodeManagerMock) ProcessNode(ctx context.Context, in *manager.NodeMetadata, opts ...grpc.CallOption) (*manager.ProcessNodeResponse, error) { return &manager.ProcessNodeResponse{}, nil } func (nm *NodeManagerMock) ChangeNodeState(ctx context.Context, in *manager.NodeState, opts ...grpc.CallOption) (*manager.ChangeNodeStateResponse, error) { return &manager.ChangeNodeStateResponse{}, nil } func (nm *NodeManagerMock) GetNodeWithSecrets(ctx context.Context, in *manager.Id, opts ...grpc.CallOption) (*nodes.Node, error) { return &nodes.Node{}, nil } func (nm *NodeManagerMock) SearchManagerNodes(ctx context.Context, in *manager.NodeQuery, opts ...grpc.CallOption) (*manager.ManagerNodes, error) { return &manager.ManagerNodes{}, nil }
[ "\"RUN_MODE\"", "\"RUN_MODE\"", "\"RUN_MODE\"", "\"RUN_MODE\"", "\"RUN_MODE\"", "\"RUN_MODE\"", "\"RUN_MODE\"" ]
[]
[ "RUN_MODE" ]
[]
["RUN_MODE"]
go
1
0
flink-python/pyflink/fn_execution/coders.py
################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ import os from abc import ABC import pytz from apache_beam.typehints import typehints from pyflink.fn_execution import flink_fn_execution_pb2 try: from pyflink.fn_execution import coder_impl_fast as coder_impl except: from pyflink.fn_execution.beam import beam_coder_impl_slow as coder_impl __all__ = ['RowCoder', 'BigIntCoder', 'TinyIntCoder', 'BooleanCoder', 'SmallIntCoder', 'IntCoder', 'FloatCoder', 'DoubleCoder', 'BinaryCoder', 'CharCoder', 'DateCoder', 'TimeCoder', 'TimestampCoder', 'BasicArrayCoder', 'PrimitiveArrayCoder', 'MapCoder', 'DecimalCoder'] FLINK_SCALAR_FUNCTION_SCHEMA_CODER_URN = "flink:coder:schema:scalar_function:v1" FLINK_TABLE_FUNCTION_SCHEMA_CODER_URN = "flink:coder:schema:table_function:v1" FLINK_AGGREGATE_FUNCTION_SCHEMA_CODER_URN = "flink:coder:schema:aggregate_function:v1" FLINK_SCALAR_FUNCTION_SCHEMA_ARROW_CODER_URN = "flink:coder:schema:scalar_function:arrow:v1" FLINK_SCHEMA_ARROW_CODER_URN = "flink:coder:schema:arrow:v1" FLINK_MAP_FUNCTION_DATA_STREAM_CODER_URN = "flink:coder:map:v1" FLINK_FLAT_MAP_FUNCTION_DATA_STREAM_CODER_URN = "flink:coder:flat_map:v1" FLINK_OVER_WINDOW_ARROW_CODER_URN = "flink:coder:schema:batch_over_window:arrow:v1" class BaseCoder(ABC): def get_impl(self): pass @staticmethod def from_schema_proto(schema_proto): pass class TableFunctionRowCoder(BaseCoder): """ Coder for Table Function Row. """ def __init__(self, flatten_row_coder): self._flatten_row_coder = flatten_row_coder def get_impl(self): return coder_impl.TableFunctionRowCoderImpl(self._flatten_row_coder.get_impl()) @staticmethod def from_schema_proto(schema_proto): return TableFunctionRowCoder(FlattenRowCoder.from_schema_proto(schema_proto)) def __repr__(self): return 'TableFunctionRowCoder[%s]' % repr(self._flatten_row_coder) def __eq__(self, other): return (self.__class__ == other.__class__ and self._flatten_row_coder == other._flatten_row_coder) def __ne__(self, other): return not self == other def __hash__(self): return hash(self._flatten_row_coder) class AggregateFunctionRowCoder(BaseCoder): """ Coder for Aggregate Function Input Row. """ def __init__(self, flatten_row_coder): self._flatten_row_coder = flatten_row_coder def get_impl(self): return coder_impl.AggregateFunctionRowCoderImpl(self._flatten_row_coder.get_impl()) @staticmethod def from_schema_proto(schema_proto): return AggregateFunctionRowCoder(FlattenRowCoder.from_schema_proto(schema_proto)) def __repr__(self): return 'AggregateFunctionRowCoder[%s]' % repr(self._flatten_row_coder) def __eq__(self, other): return (self.__class__ == other.__class__ and self._flatten_row_coder == other._flatten_row_coder) def __ne__(self, other): return not self == other def __hash__(self): return hash(self._flatten_row_coder) class FlattenRowCoder(BaseCoder): """ Coder for Row. The decoded result will be flattened as a list of column values of a row instead of a row object. """ def __init__(self, field_coders): self._field_coders = field_coders def get_impl(self): return coder_impl.FlattenRowCoderImpl([c.get_impl() for c in self._field_coders]) @staticmethod def from_schema_proto(schema_proto): return FlattenRowCoder([from_proto(f.type) for f in schema_proto.fields]) def __repr__(self): return 'FlattenRowCoder[%s]' % ', '.join(str(c) for c in self._field_coders) def __eq__(self, other): return (self.__class__ == other.__class__ and len(self._field_coders) == len(other._field_coders) and [self._field_coders[i] == other._field_coders[i] for i in range(len(self._field_coders))]) def __ne__(self, other): return not self == other def __hash__(self): return hash(self._field_coders) class DataStreamMapCoder(BaseCoder): """ Coder for a DataStream Map Function input/output data. """ def __init__(self, field_coders): self._field_coders = field_coders def get_impl(self): return coder_impl.DataStreamMapCoderImpl(self._field_coders.get_impl()) @staticmethod def from_type_info_proto(type_info_proto): return DataStreamMapCoder(from_type_info_proto(type_info_proto.field[0].type)) def __repr__(self): return 'DataStreamStatelessMapCoder[%s]' % ', '.join(str(c) for c in self._field_coders) def __eq__(self, other): return (self.__class__ == other.__class__ and len(self._field_coders) == len(other._field_coders) and [self._field_coders[i] == other._field_coders[i] for i in range(len(self._field_coders))]) def __ne__(self, other): return not self == other def __hash__(self): return hash(self._field_coders) class DataStreamFlatMapCoder(BaseCoder): """ Coder for a DataStream FlatMap Function input/output data. """ def __init__(self, field_codes): self._field_coders = field_codes def get_impl(self): return coder_impl.DataStreamFlatMapCoderImpl( DataStreamMapCoder(self._field_coders).get_impl()) @staticmethod def from_type_info_proto(type_info_proto): return DataStreamFlatMapCoder(from_type_info_proto(type_info_proto.field[0].type)) def __repr__(self): return 'DataStreamStatelessFlatMapCoder[%s]' % ', '.join(str(c) for c in self._field_coders) def __eq__(self, other): return (self.__class__ == other.__class__ and len(self._field_coders) == len(other._field_coders) and [self._field_coders[i] == other._field_coders[i] for i in range(len(self._field_coders))]) def __ne__(self, other): return not self == other def __hash__(self): return hash(self._field_coders) class FieldCoder(ABC): def get_impl(self): pass class RowCoder(FieldCoder, BaseCoder): """ Coder for Row. """ def __init__(self, field_coders): self._field_coders = field_coders def get_impl(self): return coder_impl.RowCoderImpl([c.get_impl() for c in self._field_coders]) def __repr__(self): return 'RowCoder[%s]' % ', '.join(str(c) for c in self._field_coders) def __eq__(self, other): return (self.__class__ == other.__class__ and len(self._field_coders) == len(other._field_coders) and [self._field_coders[i] == other._field_coders[i] for i in range(len(self._field_coders))]) def __ne__(self, other): return not self == other def __hash__(self): return hash(self._field_coders) class CollectionCoder(FieldCoder): """ Base coder for collection. """ def __init__(self, elem_coder): self._elem_coder = elem_coder def is_deterministic(self): return self._elem_coder.is_deterministic() def __eq__(self, other): return (self.__class__ == other.__class__ and self._elem_coder == other._elem_coder) def __repr__(self): return '%s[%s]' % (self.__class__.__name__, repr(self._elem_coder)) def __ne__(self, other): return not self == other def __hash__(self): return hash(self._elem_coder) class BasicArrayCoder(CollectionCoder): """ Coder for Array. """ def __init__(self, elem_coder): super(BasicArrayCoder, self).__init__(elem_coder) def get_impl(self): return coder_impl.BasicArrayCoderImpl(self._elem_coder.get_impl()) class PrimitiveArrayCoder(CollectionCoder): """ Coder for Primitive Array. """ def __init__(self, elem_coder): super(PrimitiveArrayCoder, self).__init__(elem_coder) def get_impl(self): return coder_impl.PrimitiveArrayCoderImpl(self._elem_coder.get_impl()) class MapCoder(FieldCoder): """ Coder for Map. """ def __init__(self, key_coder, value_coder): self._key_coder = key_coder self._value_coder = value_coder def get_impl(self): return coder_impl.MapCoderImpl(self._key_coder.get_impl(), self._value_coder.get_impl()) def is_deterministic(self): return self._key_coder.is_deterministic() and self._value_coder.is_deterministic() def __repr__(self): return 'MapCoder[%s]' % ','.join([repr(self._key_coder), repr(self._value_coder)]) def __eq__(self, other): return (self.__class__ == other.__class__ and self._key_coder == other._key_coder and self._value_coder == other._value_coder) def __ne__(self, other): return not self == other def __hash__(self): return hash([self._key_coder, self._value_coder]) class BigIntCoder(FieldCoder): """ Coder for 8 bytes long. """ def get_impl(self): return coder_impl.BigIntCoderImpl() class TinyIntCoder(FieldCoder): """ Coder for Byte. """ def get_impl(self): return coder_impl.TinyIntCoderImpl() class BooleanCoder(FieldCoder): """ Coder for Boolean. """ def get_impl(self): return coder_impl.BooleanCoderImpl() class SmallIntCoder(FieldCoder): """ Coder for Short. """ def get_impl(self): return coder_impl.SmallIntCoderImpl() class IntCoder(FieldCoder): """ Coder for 4 bytes int. """ def get_impl(self): return coder_impl.IntCoderImpl() class FloatCoder(FieldCoder): """ Coder for Float. """ def get_impl(self): return coder_impl.FloatCoderImpl() class DoubleCoder(FieldCoder): """ Coder for Double. """ def get_impl(self): return coder_impl.DoubleCoderImpl() class DecimalCoder(FieldCoder): """ Coder for Decimal. """ def __init__(self, precision, scale): self.precision = precision self.scale = scale def get_impl(self): return coder_impl.DecimalCoderImpl(self.precision, self.scale) class BigDecimalCoder(FieldCoder): """ Coder for Basic Decimal that no need to have precision and scale specified. """ def get_impl(self): return coder_impl.BigDecimalCoderImpl() class BinaryCoder(FieldCoder): """ Coder for Byte Array. """ def get_impl(self): return coder_impl.BinaryCoderImpl() class CharCoder(FieldCoder): """ Coder for Character String. """ def get_impl(self): return coder_impl.CharCoderImpl() class DateCoder(FieldCoder): """ Coder for Date """ def get_impl(self): return coder_impl.DateCoderImpl() class TimeCoder(FieldCoder): """ Coder for Time. """ def get_impl(self): return coder_impl.TimeCoderImpl() class TimestampCoder(FieldCoder): """ Coder for Timestamp. """ def __init__(self, precision): self.precision = precision def get_impl(self): return coder_impl.TimestampCoderImpl(self.precision) class LocalZonedTimestampCoder(FieldCoder): """ Coder for LocalZonedTimestamp. """ def __init__(self, precision, timezone): self.precision = precision self.timezone = timezone def get_impl(self): return coder_impl.LocalZonedTimestampCoderImpl(self.precision, self.timezone) class PickledBytesCoder(FieldCoder): def get_impl(self): return coder_impl.PickledBytesCoderImpl() class TupleCoder(FieldCoder): def __init__(self, field_coders): self._field_coders = field_coders def get_impl(self): return coder_impl.TupleCoderImpl([c.get_impl() for c in self._field_coders]) def to_type_hint(self): return typehints.Tuple def __repr__(self): return 'TupleCoder[%s]' % ', '.join(str(c) for c in self._field_coders) type_name = flink_fn_execution_pb2.Schema _type_name_mappings = { type_name.TINYINT: TinyIntCoder(), type_name.SMALLINT: SmallIntCoder(), type_name.INT: IntCoder(), type_name.BIGINT: BigIntCoder(), type_name.BOOLEAN: BooleanCoder(), type_name.FLOAT: FloatCoder(), type_name.DOUBLE: DoubleCoder(), type_name.BINARY: BinaryCoder(), type_name.VARBINARY: BinaryCoder(), type_name.CHAR: CharCoder(), type_name.VARCHAR: CharCoder(), type_name.DATE: DateCoder(), type_name.TIME: TimeCoder(), } def from_proto(field_type): """ Creates the corresponding :class:`Coder` given the protocol representation of the field type. :param field_type: the protocol representation of the field type :return: :class:`Coder` """ field_type_name = field_type.type_name coder = _type_name_mappings.get(field_type_name) if coder is not None: return coder if field_type_name == type_name.ROW: return RowCoder([from_proto(f.type) for f in field_type.row_schema.fields]) if field_type_name == type_name.TIMESTAMP: return TimestampCoder(field_type.timestamp_info.precision) if field_type_name == type_name.LOCAL_ZONED_TIMESTAMP: timezone = pytz.timezone(os.environ['table.exec.timezone']) return LocalZonedTimestampCoder(field_type.local_zoned_timestamp_info.precision, timezone) elif field_type_name == type_name.BASIC_ARRAY: return BasicArrayCoder(from_proto(field_type.collection_element_type)) elif field_type_name == type_name.MAP: return MapCoder(from_proto(field_type.map_info.key_type), from_proto(field_type.map_info.value_type)) elif field_type_name == type_name.DECIMAL: return DecimalCoder(field_type.decimal_info.precision, field_type.decimal_info.scale) else: raise ValueError("field_type %s is not supported." % field_type) # for data stream type information. type_info_name = flink_fn_execution_pb2.TypeInfo _type_info_name_mappings = { type_info_name.STRING: CharCoder(), type_info_name.BYTE: TinyIntCoder(), type_info_name.BOOLEAN: BooleanCoder(), type_info_name.SHORT: SmallIntCoder(), type_info_name.INT: IntCoder(), type_info_name.LONG: BigIntCoder(), type_info_name.FLOAT: FloatCoder(), type_info_name.DOUBLE: DoubleCoder(), type_info_name.CHAR: CharCoder(), type_info_name.BIG_INT: BigIntCoder(), type_info_name.BIG_DEC: BigDecimalCoder(), type_info_name.SQL_DATE: DateCoder(), type_info_name.SQL_TIME: TimeCoder(), type_info_name.SQL_TIMESTAMP: TimeCoder(), type_info_name.PICKLED_BYTES: PickledBytesCoder() } def from_type_info_proto(field_type): field_type_name = field_type.type_name try: return _type_info_name_mappings[field_type_name] except KeyError: if field_type_name == type_info_name.ROW: return RowCoder([from_type_info_proto(f.type) for f in field_type.row_type_info.field]) if field_type_name == type_info_name.PRIMITIVE_ARRAY: return PrimitiveArrayCoder(from_type_info_proto(field_type.collection_element_type)) if field_type_name == type_info_name.BASIC_ARRAY: return BasicArrayCoder(from_type_info_proto(field_type.collection_element_type)) if field_type_name == type_info_name.TUPLE: return TupleCoder([from_type_info_proto(f.type) for f in field_type.tuple_type_info.field]) raise ValueError("field_type %s is not supported." % field_type)
[]
[]
[ "table.exec.timezone" ]
[]
["table.exec.timezone"]
python
1
0
st2client/st2client/models/core.py
# Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import os import json import logging from functools import wraps import six from six.moves import urllib from six.moves import http_client import requests from st2client.utils import httpclient LOG = logging.getLogger(__name__) def add_auth_token_to_kwargs_from_env(func): @wraps(func) def decorate(*args, **kwargs): if not kwargs.get('token') and os.environ.get('ST2_AUTH_TOKEN', None): kwargs['token'] = os.environ.get('ST2_AUTH_TOKEN') if not kwargs.get('api_key') and os.environ.get('ST2_API_KEY', None): kwargs['api_key'] = os.environ.get('ST2_API_KEY') return func(*args, **kwargs) return decorate class Resource(object): # An alias to use for the resource if different than the class name. _alias = None # Display name of the resource. This may be different than its resource # name specifically when the resource name is composed of multiple words. _display_name = None # URL path for the resource. _url_path = None # Plural form of the resource name. This will be used to build the # latter part of the REST URL. _plural = None # Plural form of the resource display name. _plural_display_name = None # A list of class attributes which will be included in __repr__ return value _repr_attributes = [] def __init__(self, *args, **kwargs): for k, v in six.iteritems(kwargs): setattr(self, k, v) def to_dict(self, exclude_attributes=None): """ Return a dictionary representation of this object. :param exclude_attributes: Optional list of attributes to exclude. :type exclude_attributes: ``list`` :rtype: ``dict`` """ exclude_attributes = exclude_attributes or [] attributes = list(self.__dict__.keys()) attributes = [attr for attr in attributes if not attr.startswith('__') and attr not in exclude_attributes] result = {} for attribute in attributes: value = getattr(self, attribute, None) result[attribute] = value return result @classmethod def get_alias(cls): return cls._alias if cls._alias else cls.__name__ @classmethod def get_display_name(cls): return cls._display_name if cls._display_name else cls.__name__ @classmethod def get_plural_name(cls): if not cls._plural: raise Exception('The %s class is missing class attributes ' 'in its definition.' % cls.__name__) return cls._plural @classmethod def get_plural_display_name(cls): return (cls._plural_display_name if cls._plural_display_name else cls._plural) @classmethod def get_url_path_name(cls): if cls._url_path: return cls._url_path return cls.get_plural_name().lower() def serialize(self): return dict((k, v) for k, v in six.iteritems(self.__dict__) if not k.startswith('_')) @classmethod def deserialize(cls, doc): if type(doc) is not dict: doc = json.loads(doc) return cls(**doc) def __str__(self): return str(self.__repr__()) def __repr__(self): if not self._repr_attributes: return super(Resource, self).__repr__() attributes = [] for attribute in self._repr_attributes: value = getattr(self, attribute, None) attributes.append('%s=%s' % (attribute, value)) attributes = ','.join(attributes) class_name = self.__class__.__name__ result = '<%s %s>' % (class_name, attributes) return result class ResourceManager(object): def __init__(self, resource, endpoint, cacert=None, debug=False): self.resource = resource self.debug = debug self.client = httpclient.HTTPClient(endpoint, cacert=cacert, debug=debug) @staticmethod def handle_error(response): try: content = response.json() fault = content.get('faultstring', '') if content else '' if fault: response.reason += '\nMESSAGE: %s' % fault except Exception as e: response.reason += ('\nUnable to retrieve detailed message ' 'from the HTTP response. %s\n' % six.text_type(e)) response.raise_for_status() @add_auth_token_to_kwargs_from_env def get_all(self, **kwargs): # TODO: This is ugly, stop abusing kwargs url = '/%s' % self.resource.get_url_path_name() limit = kwargs.pop('limit', None) pack = kwargs.pop('pack', None) prefix = kwargs.pop('prefix', None) user = kwargs.pop('user', None) params = kwargs.pop('params', {}) if limit: params['limit'] = limit if pack: params['pack'] = pack if prefix: params['prefix'] = prefix if user: params['user'] = user response = self.client.get(url=url, params=params, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) return [self.resource.deserialize(item) for item in response.json()] @add_auth_token_to_kwargs_from_env def get_by_id(self, id, **kwargs): url = '/%s/%s' % (self.resource.get_url_path_name(), id) response = self.client.get(url, **kwargs) if response.status_code == http_client.NOT_FOUND: return None if response.status_code != http_client.OK: self.handle_error(response) return self.resource.deserialize(response.json()) @add_auth_token_to_kwargs_from_env def get_property(self, id_, property_name, self_deserialize=True, **kwargs): """ Gets a property of a Resource. id_ : Id of the resource property_name: Name of the property self_deserialize: #Implies use the deserialize method implemented by this resource. """ token = kwargs.pop('token', None) api_key = kwargs.pop('api_key', None) if kwargs: url = '/%s/%s/%s/?%s' % (self.resource.get_url_path_name(), id_, property_name, urllib.parse.urlencode(kwargs)) else: url = '/%s/%s/%s/' % (self.resource.get_url_path_name(), id_, property_name) if token: response = self.client.get(url, token=token) elif api_key: response = self.client.get(url, api_key=api_key) else: response = self.client.get(url) if response.status_code == http_client.NOT_FOUND: return None if response.status_code != http_client.OK: self.handle_error(response) if self_deserialize: return [self.resource.deserialize(item) for item in response.json()] else: return response.json() @add_auth_token_to_kwargs_from_env def get_by_ref_or_id(self, ref_or_id, **kwargs): return self.get_by_id(id=ref_or_id, **kwargs) def _query_details(self, **kwargs): if not kwargs: raise Exception('Query parameter is not provided.') token = kwargs.get('token', None) api_key = kwargs.get('api_key', None) params = kwargs.get('params', {}) for k, v in six.iteritems(kwargs): # Note: That's a special case to support api_key and token kwargs if k not in ['token', 'api_key', 'params']: params[k] = v url = '/%s/?%s' % (self.resource.get_url_path_name(), urllib.parse.urlencode(params)) if token: response = self.client.get(url, token=token) elif api_key: response = self.client.get(url, api_key=api_key) else: response = self.client.get(url) if response.status_code == http_client.NOT_FOUND: # for query and query_with_count return [], None if response.status_code != http_client.OK: self.handle_error(response) items = response.json() instances = [self.resource.deserialize(item) for item in items] return instances, response @add_auth_token_to_kwargs_from_env def query(self, **kwargs): instances, _ = self._query_details(**kwargs) return instances @add_auth_token_to_kwargs_from_env def query_with_count(self, **kwargs): instances, response = self._query_details(**kwargs) if response and 'X-Total-Count' in response.headers: return (instances, int(response.headers['X-Total-Count'])) else: return (instances, None) @add_auth_token_to_kwargs_from_env def get_by_name(self, name, **kwargs): instances = self.query(name=name, **kwargs) if not instances: return None else: if len(instances) > 1: raise Exception('More than one %s named "%s" are found.' % (self.resource.__name__.lower(), name)) return instances[0] @add_auth_token_to_kwargs_from_env def create(self, instance, **kwargs): url = '/%s' % self.resource.get_url_path_name() response = self.client.post(url, instance.serialize(), **kwargs) if response.status_code != http_client.OK: self.handle_error(response) instance = self.resource.deserialize(response.json()) return instance @add_auth_token_to_kwargs_from_env def update(self, instance, **kwargs): url = '/%s/%s' % (self.resource.get_url_path_name(), instance.id) response = self.client.put(url, instance.serialize(), **kwargs) if response.status_code != http_client.OK: self.handle_error(response) instance = self.resource.deserialize(response.json()) return instance @add_auth_token_to_kwargs_from_env def delete(self, instance, **kwargs): url = '/%s/%s' % (self.resource.get_url_path_name(), instance.id) response = self.client.delete(url, **kwargs) if response.status_code not in [http_client.OK, http_client.NO_CONTENT, http_client.NOT_FOUND]: self.handle_error(response) return False return True @add_auth_token_to_kwargs_from_env def delete_by_id(self, instance_id, **kwargs): url = '/%s/%s' % (self.resource.get_url_path_name(), instance_id) response = self.client.delete(url, **kwargs) if response.status_code not in [http_client.OK, http_client.NO_CONTENT, http_client.NOT_FOUND]: self.handle_error(response) return False try: resp_json = response.json() if resp_json: return resp_json except: pass return True class ActionAliasResourceManager(ResourceManager): def __init__(self, resource, endpoint, cacert=None, debug=False): self.resource = resource self.debug = debug self.client = httpclient.HTTPClient(root=endpoint, cacert=cacert, debug=debug) @add_auth_token_to_kwargs_from_env def match(self, instance, **kwargs): url = '/%s/match' % self.resource.get_url_path_name() response = self.client.post(url, instance.serialize(), **kwargs) if response.status_code != http_client.OK: self.handle_error(response) match = response.json() return (self.resource.deserialize(match['actionalias']), match['representation']) class ActionAliasExecutionManager(ResourceManager): @add_auth_token_to_kwargs_from_env def match_and_execute(self, instance, **kwargs): url = '/%s/match_and_execute' % self.resource.get_url_path_name() response = self.client.post(url, instance.serialize(), **kwargs) if response.status_code != http_client.OK: self.handle_error(response) instance = self.resource.deserialize(response.json()) return instance class ActionResourceManager(ResourceManager): @add_auth_token_to_kwargs_from_env def get_entrypoint(self, ref_or_id, **kwargs): url = '/%s/views/entry_point/%s' % (self.resource.get_url_path_name(), ref_or_id) response = self.client.get(url, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) return response.text class ExecutionResourceManager(ResourceManager): @add_auth_token_to_kwargs_from_env def re_run(self, execution_id, parameters=None, tasks=None, no_reset=None, delay=0, **kwargs): url = '/%s/%s/re_run' % (self.resource.get_url_path_name(), execution_id) tasks = tasks or [] no_reset = no_reset or [] if list(set(no_reset) - set(tasks)): raise ValueError('List of tasks to reset does not match the tasks to rerun.') data = { 'parameters': parameters or {}, 'tasks': tasks, 'reset': list(set(tasks) - set(no_reset)), 'delay': delay } response = self.client.post(url, data, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) instance = self.resource.deserialize(response.json()) return instance @add_auth_token_to_kwargs_from_env def get_output(self, execution_id, output_type=None, **kwargs): url = '/%s/%s/output' % (self.resource.get_url_path_name(), execution_id) if output_type: url += '?' + urllib.parse.urlencode({'output_type': output_type}) response = self.client.get(url, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) return response.text @add_auth_token_to_kwargs_from_env def pause(self, execution_id, **kwargs): url = '/%s/%s' % (self.resource.get_url_path_name(), execution_id) data = {'status': 'pausing'} response = self.client.put(url, data, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) return self.resource.deserialize(response.json()) @add_auth_token_to_kwargs_from_env def resume(self, execution_id, **kwargs): url = '/%s/%s' % (self.resource.get_url_path_name(), execution_id) data = {'status': 'resuming'} response = self.client.put(url, data, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) return self.resource.deserialize(response.json()) @add_auth_token_to_kwargs_from_env def get_children(self, execution_id, **kwargs): url = '/%s/%s/children' % (self.resource.get_url_path_name(), execution_id) depth = kwargs.pop('depth', -1) params = kwargs.pop('params', {}) if depth: params['depth'] = depth response = self.client.get(url=url, params=params, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) return [self.resource.deserialize(item) for item in response.json()] class InquiryResourceManager(ResourceManager): @add_auth_token_to_kwargs_from_env def respond(self, inquiry_id, inquiry_response, **kwargs): """ Update st2.inquiry.respond action Update st2client respond command to use this? """ url = '/%s/%s' % (self.resource.get_url_path_name(), inquiry_id) payload = { "id": inquiry_id, "response": inquiry_response } response = self.client.put(url, payload, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) return self.resource.deserialize(response.json()) class TriggerInstanceResourceManager(ResourceManager): @add_auth_token_to_kwargs_from_env def re_emit(self, trigger_instance_id, **kwargs): url = '/%s/%s/re_emit' % (self.resource.get_url_path_name(), trigger_instance_id) response = self.client.post(url, None, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) return response.json() class AsyncRequest(Resource): pass class PackResourceManager(ResourceManager): @add_auth_token_to_kwargs_from_env def install(self, packs, force=False, python3=False, skip_dependencies=False, **kwargs): url = '/%s/install' % (self.resource.get_url_path_name()) payload = { 'packs': packs, 'force': force, 'python3': python3, 'skip_dependencies': skip_dependencies } response = self.client.post(url, payload, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) instance = AsyncRequest.deserialize(response.json()) return instance @add_auth_token_to_kwargs_from_env def remove(self, packs, **kwargs): url = '/%s/uninstall' % (self.resource.get_url_path_name()) response = self.client.post(url, {'packs': packs}, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) instance = AsyncRequest.deserialize(response.json()) return instance @add_auth_token_to_kwargs_from_env def search(self, args, ignore_errors=False, **kwargs): url = '/%s/index/search' % (self.resource.get_url_path_name()) if 'query' in vars(args): payload = {'query': args.query} else: payload = {'pack': args.pack} response = self.client.post(url, payload, **kwargs) if response.status_code != http_client.OK: if ignore_errors: return None self.handle_error(response) data = response.json() if isinstance(data, list): return [self.resource.deserialize(item) for item in data] else: return self.resource.deserialize(data) if data else None @add_auth_token_to_kwargs_from_env def register(self, packs=None, types=None, **kwargs): url = '/%s/register' % (self.resource.get_url_path_name()) payload = {} if types: payload['types'] = types if packs: payload['packs'] = packs response = self.client.post(url, payload, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) instance = self.resource.deserialize(response.json()) return instance class ConfigManager(ResourceManager): @add_auth_token_to_kwargs_from_env def update(self, instance, **kwargs): url = '/%s/%s' % (self.resource.get_url_path_name(), instance.pack) response = self.client.put(url, instance.values, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) instance = self.resource.deserialize(response.json()) return instance class WebhookManager(ResourceManager): def __init__(self, resource, endpoint, cacert=None, debug=False): self.resource = resource self.debug = debug self.client = httpclient.HTTPClient(root=endpoint, cacert=cacert, debug=debug) @add_auth_token_to_kwargs_from_env def post_generic_webhook(self, trigger, payload=None, trace_tag=None, **kwargs): url = '/webhooks/st2' headers = {} data = { 'trigger': trigger, 'payload': payload or {} } if trace_tag: headers['St2-Trace-Tag'] = trace_tag response = self.client.post(url, data=data, headers=headers, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) return response.json() @add_auth_token_to_kwargs_from_env def match(self, instance, **kwargs): url = '/%s/match' % self.resource.get_url_path_name() response = self.client.post(url, instance.serialize(), **kwargs) if response.status_code != http_client.OK: self.handle_error(response) match = response.json() return (self.resource.deserialize(match['actionalias']), match['representation']) class StreamManager(object): def __init__(self, endpoint, cacert=None, debug=False): self._url = httpclient.get_url_without_trailing_slash(endpoint) + '/stream' self.debug = debug self.cacert = cacert @add_auth_token_to_kwargs_from_env def listen(self, events=None, **kwargs): # Late import to avoid very expensive in-direct import (~1 second) when this function is # not called / used from sseclient import SSEClient url = self._url query_params = {} request_params = {} if events and isinstance(events, six.string_types): events = [events] if 'token' in kwargs: query_params['x-auth-token'] = kwargs.get('token') if 'api_key' in kwargs: query_params['st2-api-key'] = kwargs.get('api_key') if events: query_params['events'] = ','.join(events) if self.cacert is not None: request_params['verify'] = self.cacert query_string = '?' + urllib.parse.urlencode(query_params) url = url + query_string response = requests.get(url, stream=True, **request_params) client = SSEClient(response) for message in client.events(): # If the execution on the API server takes too long, the message # can be empty. In this case, rerun the query. if not message.data: continue yield json.loads(message.data) class WorkflowManager(object): def __init__(self, endpoint, cacert, debug): self.debug = debug self.cacert = cacert self.endpoint = endpoint + '/workflows' self.client = httpclient.HTTPClient(root=self.endpoint, cacert=cacert, debug=debug) @staticmethod def handle_error(response): try: content = response.json() fault = content.get('faultstring', '') if content else '' if fault: response.reason += '\nMESSAGE: %s' % fault except Exception as e: response.reason += ( '\nUnable to retrieve detailed message ' 'from the HTTP response. %s\n' % six.text_type(e) ) response.raise_for_status() @add_auth_token_to_kwargs_from_env def inspect(self, definition, **kwargs): url = '/inspect' if not isinstance(definition, six.string_types): raise TypeError('Workflow definition is not type of string.') if 'headers' not in kwargs: kwargs['headers'] = {} kwargs['headers']['content-type'] = 'text/plain' response = self.client.post_raw(url, definition, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) return response.json() class ServiceRegistryGroupsManager(ResourceManager): @add_auth_token_to_kwargs_from_env def list(self, **kwargs): url = '/service_registry/groups' headers = {} response = self.client.get(url, headers=headers, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) groups = response.json()['groups'] result = [] for group in groups: item = self.resource.deserialize({'group_id': group}) result.append(item) return result class ServiceRegistryMembersManager(ResourceManager): @add_auth_token_to_kwargs_from_env def list(self, group_id, **kwargs): url = '/service_registry/groups/%s/members' % (group_id) headers = {} response = self.client.get(url, headers=headers, **kwargs) if response.status_code != http_client.OK: self.handle_error(response) members = response.json()['members'] result = [] for member in members: data = { 'group_id': group_id, 'member_id': member['member_id'], 'capabilities': member['capabilities'] } item = self.resource.deserialize(data) result.append(item) return result
[]
[]
[ "ST2_API_KEY", "ST2_AUTH_TOKEN" ]
[]
["ST2_API_KEY", "ST2_AUTH_TOKEN"]
python
2
0
code/lambda/function/Auth/Register/app.py
import json import os import logging from aws import helper from aws import federate from aws.helper import DeveloperMode logger = logging.getLogger() logger.setLevel(logging.INFO) USER_POOL_ID = os.environ["USER_POOL_ID"] USER_TABLE_NAME = os.environ["USER_TABLE_NAME"] @DeveloperMode(True) def lambda_handler(event, context): """ Lambda function to register a new user. description: This function is used to register a new user. The user is registered in the user pool and the user is added to the user table. if platform and platform token are provided, the user is federated to the platform. payload: email: email of the user password: password of the user client_id: client id of the client redirect_uri: client id of the redirect_uri optional: platform: platform to federate the user to dynamodb platform_id_token: token to federate the user to dynamodb platform_access_token: access token to federate the user to dynamodb """ input_json = dict() input_json = json.loads(event["body"]) # Input data validation ----- if not "email" in input_json: return helper.build_response( {"message": "E-mail address is required."}, 403) if not "password" in input_json: return helper.build_response({"message": "Password is required."}, 403) elif len(input_json["password"]) < 6: return helper.build_response( {"message": "Password must be at least 6 characters long."}, 403) if not "client_id" in input_json: return helper.build_response({"message": "`client_id` is required"}, 403) # data validated, assign to variables email = input_json["email"].lower() # store all emails as lower case password = input_json["password"] # verify the client_id and redirect_uri if not "client_id" in input_json or not "redirect_uri" in input_json: return helper.build_response( {"message": "You do not have permission to access this resource."}, 403) client_id = input_json["client_id"] redirect_uri = input_json["redirect_uri"] _, msg = helper.verify_client_id_and_redirect_uri( user_pool_id=USER_POOL_ID, client_id=client_id, redirect_uri=redirect_uri) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) # build client metadata for confirmation email ----- client_metadata = dict() if "agent" in input_json: client_metadata["agent"] = input_json["agent"] if "client_id" in input_json: client_metadata["client_id"] = input_json["client_id"] if "redirect_uri" in input_json: client_metadata["redirect_uri"] = input_json["redirect_uri"] # perform cognito register resp, msg = helper.register(user_pool_id=USER_POOL_ID, username=email, email=email, password=password, client_id=client_id, client_metadata=client_metadata) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) # get user info user_cognito_id = resp["UserSub"] # register the federate record in the user table if "platform_id_token" in input_json or "platform_access_token" in input_json: platform_login_data = dict() platform_login_data["platform"] = input_json["platform"] if "platform_code" in input_json: platform_login_data["code"] = input_json["platform_code"] if "platform_id_token" in input_json: platform_login_data["id_token"] = input_json["platform_id_token"] if "platform_access_token" in input_json: platform_login_data["access_token"] = input_json[ "platform_access_token"] feder_resp, msg = federate.verify_federate_and_register_or_get_user( user_table_name=USER_TABLE_NAME, platform_login_data=platform_login_data, user_cognito_id=user_cognito_id, cognito_email=email, mode="register") if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) return helper.build_response({"message": msg}, 200)
[]
[]
[ "USER_TABLE_NAME", "USER_POOL_ID" ]
[]
["USER_TABLE_NAME", "USER_POOL_ID"]
python
2
0
toml/toml.go
// Package toml adds support to marshal and unmarshal types not in the official TOML spec. package toml // import "github.com/freetsdb/freetsdb/toml" import ( "encoding" "errors" "fmt" "math" "os" "os/user" "reflect" "strconv" "strings" "time" "unicode" ) // Duration is a TOML wrapper type for time.Duration. type Duration time.Duration // String returns the string representation of the duration. func (d Duration) String() string { return time.Duration(d).String() } // UnmarshalText parses a TOML value into a duration value. func (d *Duration) UnmarshalText(text []byte) error { // Ignore if there is no value set. if len(text) == 0 { return nil } // Otherwise parse as a duration formatted string. duration, err := time.ParseDuration(string(text)) if err != nil { return err } // Set duration and return. *d = Duration(duration) return nil } // MarshalText converts a duration to a string for decoding toml func (d Duration) MarshalText() (text []byte, err error) { return []byte(d.String()), nil } // Size represents a TOML parseable file size. // Users can specify size using "k" or "K" for kibibytes, "m" or "M" for mebibytes, // and "g" or "G" for gibibytes. If a size suffix isn't specified then bytes are assumed. type Size uint64 // UnmarshalText parses a byte size from text. func (s *Size) UnmarshalText(text []byte) error { if len(text) == 0 { return fmt.Errorf("size was empty") } // The multiplier defaults to 1 in case the size has // no suffix (and is then just raw bytes) mult := uint64(1) // Preserve the original text for error messages sizeText := text // Parse unit of measure suffix := text[len(sizeText)-1] if !unicode.IsDigit(rune(suffix)) { switch suffix { case 'k', 'K': mult = 1 << 10 // KiB case 'm', 'M': mult = 1 << 20 // MiB case 'g', 'G': mult = 1 << 30 // GiB default: return fmt.Errorf("unknown size suffix: %c (expected k, m, or g)", suffix) } sizeText = sizeText[:len(sizeText)-1] } // Parse numeric portion of value. size, err := strconv.ParseUint(string(sizeText), 10, 64) if err != nil { return fmt.Errorf("invalid size: %s", string(text)) } if math.MaxUint64/mult < size { return fmt.Errorf("size would overflow the max size (%d) of a uint: %s", uint64(math.MaxUint64), string(text)) } size *= mult *s = Size(size) return nil } type FileMode uint32 func (m *FileMode) UnmarshalText(text []byte) error { // Ignore if there is no value set. if len(text) == 0 { return nil } mode, err := strconv.ParseUint(string(text), 8, 32) if err != nil { return err } else if mode == 0 { return errors.New("file mode cannot be zero") } *m = FileMode(mode) return nil } func (m FileMode) MarshalText() (text []byte, err error) { if m != 0 { return []byte(fmt.Sprintf("%04o", m)), nil } return nil, nil } type Group int func (g *Group) UnmarshalTOML(data interface{}) error { if grpName, ok := data.(string); ok { group, err := user.LookupGroup(grpName) if err != nil { return err } gid, err := strconv.Atoi(group.Gid) if err != nil { return err } *g = Group(gid) return nil } else if gid, ok := data.(int64); ok { *g = Group(gid) return nil } return errors.New("group must be a name (string) or id (int)") } func ApplyEnvOverrides(getenv func(string) string, prefix string, val interface{}) error { if getenv == nil { getenv = os.Getenv } return applyEnvOverrides(getenv, prefix, reflect.ValueOf(val), "") } func applyEnvOverrides(getenv func(string) string, prefix string, spec reflect.Value, structKey string) error { element := spec // If spec is a named type and is addressable, // check the address to see if it implements encoding.TextUnmarshaler. if spec.Kind() != reflect.Ptr && spec.Type().Name() != "" && spec.CanAddr() { v := spec.Addr() if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { value := getenv(prefix) // Skip any fields we don't have a value to set if len(value) == 0 { return nil } return u.UnmarshalText([]byte(value)) } } // If we have a pointer, dereference it if spec.Kind() == reflect.Ptr { element = spec.Elem() } value := getenv(prefix) switch element.Kind() { case reflect.String: if len(value) == 0 { return nil } element.SetString(value) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: intValue, err := strconv.ParseInt(value, 0, element.Type().Bits()) if err != nil { return fmt.Errorf("failed to apply %v to %v using type %v and value '%v': %s", prefix, structKey, element.Type().String(), value, err) } element.SetInt(intValue) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: intValue, err := strconv.ParseUint(value, 0, element.Type().Bits()) if err != nil { return fmt.Errorf("failed to apply %v to %v using type %v and value '%v': %s", prefix, structKey, element.Type().String(), value, err) } element.SetUint(intValue) case reflect.Bool: boolValue, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("failed to apply %v to %v using type %v and value '%v': %s", prefix, structKey, element.Type().String(), value, err) } element.SetBool(boolValue) case reflect.Float32, reflect.Float64: floatValue, err := strconv.ParseFloat(value, element.Type().Bits()) if err != nil { return fmt.Errorf("failed to apply %v to %v using type %v and value '%v': %s", prefix, structKey, element.Type().String(), value, err) } element.SetFloat(floatValue) case reflect.Slice: // If the type is s slice, apply to each using the index as a suffix, e.g. GRAPHITE_0, GRAPHITE_0_TEMPLATES_0 or GRAPHITE_0_TEMPLATES="item1,item2" for j := 0; j < element.Len(); j++ { f := element.Index(j) if err := applyEnvOverrides(getenv, prefix, f, structKey); err != nil { return err } if err := applyEnvOverrides(getenv, fmt.Sprintf("%s_%d", prefix, j), f, structKey); err != nil { return err } } // If the type is s slice but have value not parsed as slice e.g. GRAPHITE_0_TEMPLATES="item1,item2" if element.Len() == 0 && len(value) > 0 { rules := strings.Split(value, ",") for _, rule := range rules { element.Set(reflect.Append(element, reflect.ValueOf(rule))) } } case reflect.Struct: typeOfSpec := element.Type() for i := 0; i < element.NumField(); i++ { field := element.Field(i) // Skip any fields that we cannot set if !field.CanSet() && field.Kind() != reflect.Slice { continue } structField := typeOfSpec.Field(i) fieldName := structField.Name configName := structField.Tag.Get("toml") if configName == "-" { // Skip fields with tag `toml:"-"`. continue } if configName == "" && structField.Anonymous { // Embedded field without a toml tag. // Don't modify prefix. if err := applyEnvOverrides(getenv, prefix, field, fieldName); err != nil { return err } continue } // Replace hyphens with underscores to avoid issues with shells configName = strings.Replace(configName, "-", "_", -1) envKey := strings.ToUpper(configName) if prefix != "" { envKey = strings.ToUpper(fmt.Sprintf("%s_%s", prefix, configName)) } // If it's a sub-config, recursively apply if field.Kind() == reflect.Struct || field.Kind() == reflect.Ptr || field.Kind() == reflect.Slice || field.Kind() == reflect.Array { if err := applyEnvOverrides(getenv, envKey, field, fieldName); err != nil { return err } continue } value := getenv(envKey) // Skip any fields we don't have a value to set if len(value) == 0 { continue } if err := applyEnvOverrides(getenv, envKey, field, fieldName); err != nil { return err } } } return nil }
[]
[]
[]
[]
[]
go
0
0
openquake/calculators/tests/event_based_risk_test.py
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2015-2018 GEM Foundation # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. import os import sys import mock import unittest import numpy import h5py from nose.plugins.attrib import attr from openquake.baselib.general import gettemp from openquake.calculators.views import view from openquake.calculators.tests import CalculatorTestCase, strip_calc_id from openquake.calculators.export import export from openquake.calculators.extract import extract from openquake.qa_tests_data.event_based_risk import ( case_1, case_2, case_3, case_4, case_4a, case_6c, case_master, case_miriam, occupants, case_1g, case_7a) # used for a sanity check def check_total_losses(calc): dstore = calc.datastore loss_dt = calc.oqparam.loss_dt() LI = len(loss_dt.names) data1 = numpy.zeros(LI, numpy.float32) alt = dstore['losses_by_event'].value for li, lt in enumerate(loss_dt.names): data1[li] += alt['loss'][:, li].sum() # test the asset_loss_table exporter; notice that I need to disable # the parallelism to avoid reading bogus data: this is the usual # heisenbug when reading in parallel an .hdf5 generated in process with mock.patch.dict(os.environ, {'OQ_DISTRIBUTE': 'no'}): [fname] = export(('asset_loss_table', 'hdf5'), dstore) print('Generating %s' % fname) with h5py.File(fname) as f: total = f['asset_loss_table'].attrs['total'] # check the sums are consistent with the ones coming from asset_loss_table numpy.testing.assert_allclose(data1, total, 1E-6) class EventBasedRiskTestCase(CalculatorTestCase): def check_attr(self, name, value): got = self.calc.datastore.get_attr('agg_curves-stats', name) numpy.testing.assert_equal(value, got) def assert_stats_ok(self, pkg, job_ini): out = self.run_calc(pkg.__file__, job_ini, exports='csv', concurrent_tasks='4') # NB: it is important to use concurrent_tasks > 1 to test the # complications of concurrency (for instance the noncommutativity of # numpy.float32 addition when computing the average losses) all_csv = [] for fnames in out.values(): for fname in fnames: if 'rlz' in fname: continue elif fname.endswith('.csv') and any(x in fname for x in ( 'loss_curve', 'loss_map', 'agg_loss', 'avg_loss')): all_csv.append(fname) assert all_csv, 'Could not find any CSV file??' for fname in all_csv: self.assertEqualFiles( 'expected/%s' % strip_calc_id(fname), fname) @attr('qa', 'risk', 'event_based_risk') def test_case_1(self): self.run_calc(case_1.__file__, 'job.ini') ekeys = [('agg_curves-stats', 'csv')] for ekey in ekeys: for fname in export(ekey, self.calc.datastore): self.assertEqualFiles( 'expected/%s' % strip_calc_id(fname), fname) # make sure the agg_curves-stats has the right attrs self.check_attr('return_periods', [30, 60, 120, 240, 480, 960]) self.check_attr('units', [b'EUR', b'EUR']) self.check_attr('nbytes', 96) # test the loss curves exporter [f1] = export(('loss_curves/rlz-0', 'csv'), self.calc.datastore) [f2] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/loss_curves-rlz-000.csv', f1) self.assertEqualFiles('expected/loss_curves-rlz-001.csv', f2) [f] = export(('loss_curves/mean', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/loss_curves-mean.csv', f) # test the loss maps exporter fnames = export(('loss_maps-stats', 'csv'), self.calc.datastore) assert fnames for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) # test the rup_loss_table exporter fnames = export(('rup_loss_table', 'xml'), self.calc.datastore) self.assertEqual(len(fnames), 2) for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname) @attr('qa', 'risk', 'event_based_risk') def test_case_1g(self): # vulnerability function with PMF self.run_calc(case_1g.__file__, 'job_h.ini,job_r.ini') [fname] = export(('avg_losses-rlzs', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/avg_losses.csv', fname) os.remove(fname) @attr('qa', 'risk', 'event_based_risk') def test_case_2(self): self.run_calc(case_2.__file__, 'job.ini') fname = gettemp(view('mean_avg_losses', self.calc.datastore)) self.assertEqualFiles('expected/mean_avg_losses.txt', fname) os.remove(fname) # test the composite_risk_model keys (i.e. slash escaping) crm = sorted(self.calc.datastore.getitem('composite_risk_model')) self.assertEqual(crm, ['RC%2B', 'RM', 'W%2F1']) # test the case when all GMFs are filtered out with self.assertRaises(RuntimeError) as ctx: self.run_calc(case_2.__file__, 'job.ini', minimum_intensity='10.0') self.assertEqual( str(ctx.exception), 'No GMFs were generated, perhaps they were all below the ' 'minimum_intensity threshold') @attr('qa', 'risk', 'event_based_risk') def test_case_2_sampling(self): self.run_calc(case_2.__file__, 'job_sampling.ini') self.assertEqual(len(self.calc.datastore['events']), 20) # TODO: improve this test @attr('qa', 'risk', 'event_based_risk') def test_case_2_correlation(self): self.run_calc(case_2.__file__, 'job_loss.ini', asset_correlation=1.0) [fname] = export(('agg_loss_table', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/agg_losses.csv', fname) check_total_losses(self.calc) @attr('qa', 'risk', 'event_based_risk') def test_missing_taxonomy(self): with self.assertRaises(RuntimeError) as ctx: self.run_calc(case_2.__file__, 'job_err.ini') self.assertIn('not in the risk model', str(ctx.exception)) @attr('qa', 'risk', 'event_based_risk') def test_case_3(self): # this is a test with statistics and without conditional_loss_poes self.run_calc(case_3.__file__, 'job.ini', exports='csv', concurrent_tasks='4') # test the number of bytes saved in the rupture records nbytes = self.calc.datastore.get_attr('ruptures', 'nbytes') self.assertEqual(nbytes, 1404) # test postprocessing self.calc.datastore.close() hc_id = self.calc.datastore.calc_id self.run_calc(case_3.__file__, 'job.ini', exports='csv', hazard_calculation_id=str(hc_id), concurrent_tasks='0') # avoid hdf5 fork issues [fname] = export(('agg_curves-stats', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname) @attr('qa', 'risk', 'event_based_risk') def test_case_4(self): # Turkey with SHARE logic tree self.run_calc(case_4.__file__, 'job.ini') [fname] = export(('avg_losses-stats', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/avg_losses-mean.csv', fname) fnames = export(('agg_loss_table', 'csv'), self.calc.datastore) assert fnames, 'No agg_losses exported??' for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname) @attr('qa', 'risk', 'event_based_risk') def test_occupants(self): self.run_calc(occupants.__file__, 'job.ini') fnames = export(('agg_curves-rlzs', 'csv'), self.calc.datastore) for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore) assert fnames, 'loss_maps-rlzs not exported?' for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) @attr('qa', 'risk', 'event_based_risk') def test_case_master(self): if sys.platform == 'darwin': raise unittest.SkipTest('MacOSX') self.run_calc(case_master.__file__, 'job.ini', exports='csv') fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore) assert fnames, 'avg_losses-stats not exported?' for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) # extract loss_curves/rlz-1 (with the first asset having zero losses) [fname] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore) assert fnames, 'loss_maps-rlzs not exported?' for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) # extract curves by tag tags = 'taxonomy=tax1&state=01&cresta=0.11' a = extract(self.calc.datastore, 'aggcurves/structural?' + tags) self.assertEqual(a.array.shape, (4, 3)) # 4 stats, 3 return periods fname = gettemp(view('portfolio_loss', self.calc.datastore)) self.assertEqualFiles('expected/portfolio_loss.txt', fname, delta=1E-5) os.remove(fname) # check ruptures are stored correctly fname = gettemp(view('ruptures_events', self.calc.datastore)) self.assertEqualFiles('expected/ruptures_events.txt', fname) os.remove(fname) check_total_losses(self.calc) @attr('qa', 'risk', 'event_based_risk') def test_case_miriam(self): # this is a case with a grid and asset-hazard association self.run_calc(case_miriam.__file__, 'job.ini', exports='csv') [fname] = export(('agg_loss_table', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/agg_losses-rlz000-structural.csv', fname, delta=1E-5) fname = gettemp(view('portfolio_loss', self.calc.datastore)) self.assertEqualFiles( 'expected/portfolio_loss.txt', fname, delta=1E-5) os.remove(fname) # this is a case with exposure and region_grid_spacing self.run_calc(case_miriam.__file__, 'job2.ini') hcurves = dict(extract(self.calc.datastore, 'hcurves'))['all'] sitecol = self.calc.datastore['sitecol'] # filtered sitecol self.assertEqual(len(hcurves), len(sitecol)) assetcol = self.calc.datastore['assetcol'] self.assertEqual(len(sitecol), 21) self.assertGreater(sitecol.vs30.sum(), 0) self.assertEqual(len(assetcol), 548) @attr('qa', 'risk', 'event_based_risk') def test_case_7a(self): # case with <insuranceLimit isAbsolute="false"/> # this is also a case with preimported exposure self.run_calc(case_7a.__file__, 'job_h.ini') self.run_calc(case_7a.__file__, 'job_r.ini', hazard_calculation_id=str(self.calc.datastore.calc_id)) [fname] = export(('agg_loss_table', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/agg_losses.csv', fname, delta=1E-5) @attr('qa', 'hazard', 'event_based') def test_case_4_hazard(self): # Turkey with SHARE logic tree; TODO: add site model # it has 8 realizations but 4 of them have 0 ruptures out = self.run_calc(case_4.__file__, 'job.ini', calculation_mode='event_based', ground_motion_fields='false', exports='csv') [f1, f2] = [f for f in out['hcurves', 'csv'] if 'mean' in f] self.assertEqualFiles('expected/hazard_curve-mean-PGA.csv', f1) self.assertEqualFiles('expected/hazard_curve-mean-SA(0.5).csv', f2) [fname] = [f for f in out['hmaps', 'csv'] if 'mean' in f] self.assertEqualFiles('expected/hazard_map-mean.csv', fname) fnames = export(('hmaps', 'xml'), self.calc.datastore) self.assertEqual(len(fnames), 36) # 2 IMT x 2 poes + 32 files @attr('qa', 'hazard', 'event_based') def test_case_4a(self): # the case of a site_model.xml with 7 sites but only 1 asset out = self.run_calc(case_4a.__file__, 'job_hazard.ini', exports='csv') [fname, _sitefile] = out['gmf_data', 'csv'] self.assertEqualFiles('expected/gmf-data.csv', fname) @attr('qa', 'hazard', 'event_based_risk') def test_case_6c(self): # case with asset_correlation=1 self.run_calc(case_6c.__file__, 'job_h.ini') hc = str(self.calc.datastore.calc_id) out = self.run_calc(case_6c.__file__, 'job_r.ini', exports='csv', hazard_calculation_id=hc, concurrent_tasks='0') [fname] = out['avg_losses-rlzs', 'csv'] self.assertEqualFiles('expected/avg_losses.csv', fname, delta=1E-5)
[]
[]
[]
[]
[]
python
0
0
pygimli/physics/em/tdem.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Time Domain Electromagnetics (TDEM) functions and class""" import sys from math import pi import numpy as np import matplotlib.pyplot as plt import pygimli as pg from . vmd import VMDTimeDomainModelling def rhoafromU(U, t, Tx, current=1.0, Rx=None): r"""Apparent resistivity curve from classical TEM (U or dB/dt) rhoafromU(U/I, t, TXarea[, RXarea]) .. math:: \rho_a = ( A_{Rx} *A_{Tx} * \mu_0 / 20 / (U/I) )^2/3*t^{-5/3}*4e-7 """ UbyI = U / current if Rx is None: Rx = Tx # assume single/coincident loop mu0 = 4e-7 * pi rhoa = (Rx * Tx * mu0 / 20. / UbyI)**(2. / 3.) * \ t**(-5. / 3.) * mu0 / pi return rhoa def rhoafromB(B, t, Tx, current=1): r"""Apparent resistivity from B-field TEM .. math:: \rho_a = ( (A_{Tx}*I*\mu_0 ) / (30B) )^2/3 * 4e-7 / t """ mu0 = 4e-7 * pi rhoa = (current * Tx * mu0 / 30. / B)**(2. / 3.) * mu0 / pi / t return rhoa # TODO: better derive a class TEMsounding from dict and put functions in there def TxArea(snd): """ return effective transmitter area """ if isinstance(snd['LOOP_SIZE'], str): Tx = np.prod([float(a) for a in snd['LOOP_SIZE'].split()]) else: Tx = snd['LOOP_SIZE'] return Tx def RxArea(snd): """Return effective receiver area.""" Rx = 0 # just in case of error if 'COIL_SIZE' in snd: Rx = snd['COIL_SIZE'] if Rx == 700.: Rx = 100. # hack for wrong turns in NMR noise loop else: # no coil size given ==> COI or SIN ==> take loop size Rx = TxArea(snd) return Rx def get_rhoa(snd, cal=260e-9, corrramp=False, verbose=False): """Compute apparent resistivity from sounding (usf) dict.""" Tx = TxArea(snd) Rx = RxArea(snd) if 'COIL_SIZE' in snd: Rx = snd['COIL_SIZE'] else: Rx = Tx if verbose: print("Tx/Rx", Tx, Rx) v = snd['VOLTAGE'] istart, istop = 0, len(v) # default: take all mav = np.arange(len(v))[v == max(v)] if len(mav) > 1: # several equal big ones: start after istart = max(mav) + 1 if min(v) < 0.0: # negative values: stop at first istop = np.argmax(v[20:] < 0.0) + 20 if verbose: print(istart, istop) v = v[istart:istop] if 'ST_DEV' in snd: dv = snd['ST_DEV'][istart:istop] # / snd['CURRENT'] else: dv = v * 0.01 t = snd['TIME'][istart:istop] * 1.0 if corrramp and 'RAMP_TIME' in snd: t = t - snd['RAMP_TIME'] / 2 if Rx == 1: # apparently B-field not dB/dt rhoa = rhoafromB(B=v*cal, t=t, Tx=Tx) else: if verbose: print("Using rhoafromU:", v, t, Tx, Rx) rhoa = rhoafromU(U=v, t=t, Tx=Tx, Rx=Rx) if verbose: print(rhoa[0], rhoa[10], rhoa[-1]) rhoaerr = dv / v * (2. / 3.) return rhoa, t, rhoaerr def readusffile(filename, stripnoise=True): """Read data from single USF (universal sounding file) file Examples -------- DATA = readusffile(filename) DATA = readusffile(filename, DATA) will append to DATA """ DATA = [] columns = [] nr = 0 station = {} sounding = {} sounding['FILENAME'] = filename isdata = False fid = open(filename) for line in fid: zeile = line.rstrip('\n').replace(',', ' ') # commas useless here if zeile: # anything at all if zeile[0] == '/': # comment-like if zeile[1:4] == 'END': # end of a sounding if isdata: # already read some data sounding['data'] = columns for i, cn in enumerate(sounding['column_names']): sounding[cn] = columns[:, i] sounding['FILENAME'] = filename if 'INSTRUMENT' in sounding and 'ST_DEV' in sounding: if 'terraTEM' in sounding['INSTRUMENT']: sounding['ST_DEV'] *= 0.01 print('taking default stdev') sounding.update(station) if not(stripnoise and 'SWEEP_IS_NOISE' in sounding and sounding['SWEEP_IS_NOISE'] == 1): DATA.append(sounding) sounding = {} isdata = not isdata # turn off data mode elif zeile.find(':') > 0: # key-value pair key, value = zeile[1:].split(':') try: val = float(value) sounding[key] = val except: sounding[key] = value if 'SWEEP' in key and len(station) == 0: # first sweep station = sounding.copy() # save global settings else: if isdata: values = zeile.split() try: for i, v in enumerate(values): columns[nr, i] = float(v) nr += 1 except: sounding['column_names'] = values columns = np.zeros((int(sounding['POINTS']), len(values))) nr = 0 fid.close() return DATA def readusffiles(filenames): """Read all soundings data from a list of usf files Example ------- DATA = readusffiles(filenames) """ from glob import glob if isinstance(filenames, str): if filenames.find('*') >= 0: filenames = glob(filenames) else: filenames = [filenames] DATA = [] for onefile in filenames: DATA.extend(readusffile(onefile)) return DATA def readTEMfastFile(temfile): """ReadTEMfastFile(filename) reads TEM-fast file into usf sounding.""" snd = {} snd['FILENAME'] = temfile fid = open(temfile) for i in range(4): zeile = fid.readline() snd['STACK_SIZE'] = int(zeile.split()[3]) snd['RAMP_TIME'] = float(zeile.split()[5])*1e-6 snd['CURRENT'] = float(zeile.split()[7][2:]) zeile = fid.readline() fid.close() snd['LOOP_SIZE'] = float(zeile.split()[2])**2 snd['COIL_SIZE'] = float(zeile.split()[5])**2 t, v, e, r = np.loadtxt(temfile, skiprows=8, usecols=(1, 2, 3, 4), unpack=True) ind = np.nonzero((r > 0) * (v > 0) * (t > snd['RAMP_TIME']*1.2e6)) # us snd['TIME'] = t[ind] * 1e-6 # us snd['VOLTAGE'] = v[ind] snd['ST_DEV'] = e[ind] snd['RHOA'] = r[ind] return snd def readUniKTEMData(filename): """Read TEM data format of University of Cologne.""" if '*' in filename: from glob import glob allfiles = glob(filename) else: allfiles = [filename] DATA = [] for filename in allfiles: snd = {} snd['FILENAME'] = filename A = np.loadtxt(filename) snd['TIME'] = A[:, 1] snd['VOLTAGE'] = A[:, 2] snd['ST_DEV'] = A[:, 4] / 100 * A[:, 2] DATA.append(snd) return DATA def readSiroTEMData(fname): """Read TEM data from siroTEM instrument dump. Example ------- DATA = readSiroTEMData(filename) .. list of soundings with USF and siro-specific keys """ Time_ST = np.array([487., 887., 1287., 1687., 2087., 2687., 3487., 4287., 5087., 5887., 7087., 8687., 10287., 11887., 13487., 15887., 19087., 22287., 25487., 28687., 33487., 39887., 46287., 52687., 59087., 68687., 81487., 94287., 107090., 119890., 139090., 164690., 190290., 215890., 241490., 279890., 331090., 382290., 433490., 484690., 561490., 663890., 766290., 868690., 971090., 1124700., 1329500., 1534300., 1739100., 1943900.]) Time_ET = np.array([0.05, 0.1, 0.15, 0.25, 0.325, 0.425, 0.525, 0.625, 0.725, 0.875, 1.075, 1.275, 1.475, 1.675, 1.975, 2.375, 2.775, 3.175, 3.575, 4.175, 4.975, 5.775, 6.575, 7.375, 8.575, 10.175, 11.775, 13.375, 14.975, 17.375, 20.575, 23.775, 26.975, 30.175, 34.975, 41.375, 47.775, 54.175, 60.574, 70.175, 82.975, 95.775, 108.575, 121.375, 140.575, 166.175, 191.775, 217.375, 242.975, 281.375, 332.575]) fid = open(fname) # read in file header until : sign line = 'a' while len(line) > 0 and line[0] != ':': line = fid.readline() DATA = [] line = fid.readline() while line[0] != ';': header = line[1:-6].split(',') snd = {} # dictionary, uppercase corresponds to USF format keys snd['INSTRUMENT'] = 'siroTEM' snd['dtype'] = int(header[3]) dstring = header[1] snd['DATE'] = int('20' + dstring[6:8] + dstring[3:4] + dstring[0:1]) snd['win0'], snd['win1'], ngain, snd['conf'], snd['nch'] = \ [int(h) for h in header[5:10]] snd['SOUNDING_NUMBER'] = int(header[10]) snd['GAIN_FACTOR'] = [0.1, 1.0, 10.0, 100.0][ngain] # predefined gains snd['STACK_SIZE'] = int(header[14]) snd['ttype'] = int(header[20]) # 1=composite, 2=earlytime, 3=standard, 4=highresolution snd['CURRENT'] = float(header[17]) snd['RAMP_TIME'] = float(header[18]) * 1e-6 snd['TIME_DELAY'] = float(header[19]) snd['LOOP_SIZE'] = float(header[21]) snd['COIL_SIZE'] = float(header[22]) fid.readline() data = [] line = fid.readline()[:-1] # trim CR+LF newline while len(line) > 0: while line[-1] == '/': line = line[:-1] + fid.readline()[:-1].replace('\t', '') # aline = line nums = [float(el[-7:-2]) * 10**(float(el[-2:])) for el in line[1:-5].split(',')[1:]] data.append(np.array(nums)) line = fid.readline().rstrip('\n').rstrip('\r') snd['VOLTAGE'] = data[0] if snd['ttype'] == 2: # early time snd['TIME'] = Time_ET[snd['win0'] - 1:snd['win1']] * 1e-3 if snd['ttype'] == 3: # standard time snd['TIME'] = Time_ST[snd['win0'] - 1:snd['win1']] * 1e-6 snd['ST_DEV'] = data[1] if snd['dtype'] > 0: # normal measurement DATA.append(snd) line = fid.readline() fid.close() # DATA['FILENAME'] = fname # makes no sense as DATA is an array->snd? return DATA def getname(snd): """Generate label name from filename entry.""" fname = snd['FILENAME'] name = fname[fname.rfind('\\')+1:-4] if 'STACK_SIZE' in snd: name += '-' + str(int(snd['STACK_SIZE'])) return name class TDEM(): """TEM class mainly for holding data etc.""" def __init__(self, filename=None): """Initialize class and (optionally) load data""" self.DATA = [] self.names = [] if filename: self.load(filename) def load(self, filename): """Road data from usf, txt (siroTEM), tem (TEMfast) or UniK file.""" if filename.lower().endswith('.usf'): self.DATA.extend(readusffiles(filename)) elif filename.lower().endswith('.txt'): self.DATA = readSiroTEMData(filename) elif filename.lower().endswith('.tem'): self.DATA.append(readTEMfastFile(filename)) elif filename.lower().endswith('.dat'): # dangerous self.DATA = readUniKTEMData(filename) def __repr__(self): return "<TDEMdata: %d soundings>" % (len(self.DATA)) def showInfos(self): # only for old scripts using it print(self.__repr__) def plotTransients(self, ax=None, **kwargs): """Plot all transients into one window""" if ax is None: fig, ax = plt.subplots() else: fig = ax.get_figure() kwargs.setdefault('marker', '.') plotlegend = kwargs.pop('legend', True) cols = 'rgbmcyk' pl = [] for i, data in enumerate(self.DATA): t = data['TIME'] u = data['VOLTAGE'] / RxArea(data) col = cols[i % len(cols)] pl.append(ax.loglog(t, u, label=getname(data), color=col, **kwargs)) if 'ST_DEV' in data: err = data['ST_DEV'] / RxArea(data) ax.errorbar(t, u, yerr=err, color=col) # uU = u + err # uL = u - err # ax.errorbar(t, u, yerr=[uL, uU], color=col) if 'RAMP_TIME' in data: ax.vlines(data['RAMP_TIME'], min(u), max(u), colors=col) ax.set_xlabel('t [s]') ax.set_ylabel('U/I [V/A]') if plotlegend: ax.legend(loc='best') # xlim = [10e-6, 2e-3] ax.grid(True) return fig, ax def plotRhoa(self, ax=None, ploterror=False, corrramp=False, **kwargs): """Plot all apparent resistivity curves into one window.""" if ax is None: fig, ax = plt.subplots() kwargs.setdefault('marker', '.') plotLegend = kwargs.pop('legend', True) for i, data in enumerate(self.DATA): rhoa, t, err = get_rhoa(data, corrramp=corrramp) err[err > .99] = .99 col = 'C'+str(i % 10) ax.loglog(rhoa, t, label=getname(data), # color=col, color=col, **kwargs) if ploterror: ax.errorbar(rhoa, t, xerr=rhoa * err, color=col) ax.set_ylabel('t [s]') ax.set_xlabel(r'$\rho_a$ [$\Omega$m]') if plotLegend: ax.legend(loc='best') ax.grid(True) ax.set_ylim(ax.get_ylim()[::-1]) return ax def __call__(self, i=0): """Return a single sounding.""" return self.DATA[i] def getFOP(self, nr=0): """Return forward operator.""" snd = self.DATA[0] return VMDTimeDomainModelling(snd['TIME'], TxArea(snd), 1) # RxArea(snd)) # return VMDTimeDomainModelling(snd['TIME'], TxArea(snd), RxArea(snd)) def invert(self, nr=0, nlay=4, thickness=None): """Do inversion.""" self.fop = self.getFOP(nr) snd = self.DATA[nr] rhoa, t, err = get_rhoa(snd) self.fop.t = t model = self.fop.createStartModel(rhoa, nlay, thickness=None) self.INV = pg.frameworks.MarquardtInversion(fop=self.fop) # self.INV = pg.Inversion(rhoa, self.fop) # self.INV.setMarquardtScheme(0.9) # self.INV.setModel(model) # self.INV.setLambda(1000) # self.INV.setRelativeError(snd.pop('ST_DEV', 0)/snd['VOLTAGE']+0.03) errorVals = snd.pop('ST_DEV', 0)/snd['VOLTAGE']+0.03 self.model = self.INV.run(dataVals=rhoa, errorVals=errorVals, startModel=model) return self.model def stackAll(self, tmin=0, tmax=100): """Stack all measurements yielding a new TDEM class instance.""" t = self.DATA[0]['TIME'] v = np.zeros_like(t) V = np.zeros((len(v), len(self.DATA))) sumstacks = 0 for i, snd in enumerate(self.DATA): if np.allclose(snd['TIME'], t): stacks = snd.pop('STACK_SIZE', 1) v += snd['VOLTAGE'] * stacks sumstacks += stacks V[:, i] = snd['VOLTAGE'] else: print("sounding {} does not have the same time!".format(i)) v /= sumstacks VM = np.ma.masked_less_equal(V, 0) err = np.std(VM, axis=1).data snd = self.DATA[0].copy() fi = np.nonzero((t >= tmin) & (t <= tmax))[0] snd['TIME'] = t[fi] snd['VOLTAGE'] = v[fi] snd['ST_DEV'] = err[fi] del snd['data'] tem = TDEM() tem.DATA = [snd] return tem if __name__ == '__main__': print("do some tests here") tem = TDEM(sys.argv[1]) print(tem) tem.plotTransients() tem.plotRhoa()
[]
[]
[]
[]
[]
python
null
null
null
internal/log.go
package internal import ( "os" ) var IsDebug = GetOkOnce(func() bool { debug := os.Getenv("debug") return debug == "true" || debug == "t" || debug == "1" || debug == "ok" }) var IsSkipErrorFile = GetOkOnce(func() bool { skipErrorFile := os.Getenv("skip_error_file") return skipErrorFile == "true" || skipErrorFile == "t" || skipErrorFile == "1" || skipErrorFile == "ok" }) func InitDebug() { Wrap(os.Setenv("debug", "true"), "set debug env error") } func InitSkipErrorFile() { Wrap(os.Setenv("skip_error_file", "true"), "set debug env error") }
[ "\"debug\"", "\"skip_error_file\"" ]
[]
[ "debug", "skip_error_file" ]
[]
["debug", "skip_error_file"]
go
2
0
plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.hypervisor.xenserver.resource; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.net.URLConnection; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Properties; import java.util.Queue; import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeoutException; import javax.naming.ConfigurationException; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.BooleanUtils; import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import org.joda.time.Duration; import org.w3c.dom.Document; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.xml.sax.InputSource; import org.xml.sax.SAXException; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; import com.cloud.agent.api.GetHostStatsCommand; import com.cloud.agent.api.GetVmStatsCommand; import com.cloud.agent.api.HostStatsEntry; import com.cloud.agent.api.HostVmStateReportEntry; import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.PingRoutingCommand; import com.cloud.agent.api.PingRoutingWithNwGroupsCommand; import com.cloud.agent.api.PingRoutingWithOvsCommand; import com.cloud.agent.api.RebootAnswer; import com.cloud.agent.api.RebootCommand; import com.cloud.agent.api.SetupGuestNetworkCommand; import com.cloud.agent.api.StartAnswer; import com.cloud.agent.api.StartCommand; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; import com.cloud.agent.api.StartupStorageCommand; import com.cloud.agent.api.StopAnswer; import com.cloud.agent.api.StopCommand; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.agent.api.VgpuTypesInfo; import com.cloud.agent.api.VmStatsEntry; import com.cloud.agent.api.routing.IpAssocCommand; import com.cloud.agent.api.routing.IpAssocVpcCommand; import com.cloud.agent.api.routing.NetworkElementCommand; import com.cloud.agent.api.routing.SetNetworkACLCommand; import com.cloud.agent.api.routing.SetSourceNatCommand; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.GPUDeviceTO; import com.cloud.agent.api.to.IpAddressTO; import com.cloud.agent.api.to.NfsTO; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.resource.virtualnetwork.VRScripts; import com.cloud.agent.resource.virtualnetwork.VirtualRouterDeployer; import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource; import com.cloud.exception.InternalErrorException; import com.cloud.host.Host.Type; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.xenserver.resource.wrapper.xenbase.CitrixRequestWrapper; import com.cloud.hypervisor.xenserver.resource.wrapper.xenbase.XenServerUtilitiesHelper; import com.cloud.network.Networks; import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.TrafficType; import com.cloud.resource.ServerResource; import com.cloud.resource.hypervisor.HypervisorResource; import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.resource.StorageSubsystemCommandHandler; import com.cloud.storage.resource.StorageSubsystemCommandHandlerBase; import com.cloud.template.VirtualMachineTemplate.BootloaderType; import com.cloud.utils.ExecutionResult; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.PropertiesUtil; import com.cloud.utils.StringUtils; import com.cloud.utils.Ternary; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.script.Script; import com.cloud.utils.ssh.SSHCmdHelper; import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.PowerState; import com.trilead.ssh2.SCPClient; import com.xensource.xenapi.Bond; import com.xensource.xenapi.Connection; import com.xensource.xenapi.Console; import com.xensource.xenapi.Host; import com.xensource.xenapi.HostCpu; import com.xensource.xenapi.HostMetrics; import com.xensource.xenapi.Network; import com.xensource.xenapi.PBD; import com.xensource.xenapi.PIF; import com.xensource.xenapi.Pool; import com.xensource.xenapi.SR; import com.xensource.xenapi.Session; import com.xensource.xenapi.Task; import com.xensource.xenapi.Types; import com.xensource.xenapi.Types.BadServerResponse; import com.xensource.xenapi.Types.VmPowerState; import com.xensource.xenapi.Types.XenAPIException; import com.xensource.xenapi.VBD; import com.xensource.xenapi.VDI; import com.xensource.xenapi.VIF; import com.xensource.xenapi.VLAN; import com.xensource.xenapi.VM; import com.xensource.xenapi.XenAPIObject; /** * CitrixResourceBase encapsulates the calls to the XenServer Xapi process to * perform the required functionalities for CloudStack. * * ==============> READ THIS <============== Because the XenServer objects can * expire when the session expires, we cannot keep any of the actual XenServer * objects in this class. The only thing that is constant is the UUID of the * XenServer objects but not the objects themselves! This is very important * before you do any changes in this code here. * */ public abstract class CitrixResourceBase implements ServerResource, HypervisorResource, VirtualRouterDeployer { /** * used to describe what type of resource a storage device is of */ public enum SRType { EXT, ISO, LVM, LVMOHBA, LVMOISCSI, /** * used for resigning metadata (like SR UUID and VDI UUID when a * particular storage manager is installed on a XenServer host (for back-end snapshots to work)) */ RELVMOISCSI, NFS; String _str; private SRType() { _str = super.toString().toLowerCase(); } public boolean equals(final String type) { return _str.equalsIgnoreCase(type); } @Override public String toString() { return _str; } } private final static int BASE_TO_CONVERT_BYTES_INTO_KILOBYTES = 1024; private static final XenServerConnectionPool ConnPool = XenServerConnectionPool.getInstance(); // static min values for guests on xenserver private static final long mem_128m = 134217728L; static final Random Rand = new Random(System.currentTimeMillis()); private static final Logger s_logger = Logger.getLogger(CitrixResourceBase.class); protected static final HashMap<VmPowerState, PowerState> s_powerStatesTable; private String xenServer70plusGuestToolsName = "guest-tools.iso"; private String xenServerBefore70GuestToolsName = "xs-tools.iso"; static { s_powerStatesTable = new HashMap<VmPowerState, PowerState>(); s_powerStatesTable.put(VmPowerState.HALTED, PowerState.PowerOff); s_powerStatesTable.put(VmPowerState.PAUSED, PowerState.PowerOff); s_powerStatesTable.put(VmPowerState.RUNNING, PowerState.PowerOn); s_powerStatesTable.put(VmPowerState.SUSPENDED, PowerState.PowerOff); s_powerStatesTable.put(VmPowerState.UNRECOGNIZED, PowerState.PowerUnknown); } private static PowerState convertToPowerState(final VmPowerState ps) { final PowerState powerState = s_powerStatesTable.get(ps); return powerState == null ? PowerState.PowerUnknown : powerState; } private static boolean isAlienVm(final VM vm, final Connection conn) throws XenAPIException, XmlRpcException { // TODO : we need a better way to tell whether or not the VM belongs to // CloudStack final String vmName = vm.getNameLabel(conn); if (vmName.matches("^[ivs]-\\d+-.+")) { return false; } return true; } protected IAgentControl _agentControl; protected boolean _canBridgeFirewall = false; protected String _cluster; // Guest and Host Performance Statistics protected String _consolidationFunction = "AVERAGE"; protected long _dcId; protected String _guestNetworkName; protected int _heartbeatInterval = 60; protected int _heartbeatTimeout = 120; protected XsHost _host = new XsHost(); protected String _instance; // instance name (default is usually "VM") protected boolean _isOvs = false; protected String _linkLocalPrivateNetworkName; protected int _maxNics = 7; final int _maxWeight = 256; protected int _migratewait; protected String _name; protected Queue<String> _password = new LinkedList<String>(); protected String _pod; protected int _pollingIntervalInSeconds = 60; protected String _privateNetworkName; protected String _publicNetworkName; protected final int _retry = 100; protected boolean _securityGroupEnabled; protected final int _sleep = 10000; protected String _storageNetworkName1; protected String _storageNetworkName2; protected List<VIF> _tmpDom0Vif = new ArrayList<VIF>(); protected String _username; protected VirtualRoutingResource _vrResource; protected String _configDriveIsopath = "/opt/xensource/packages/configdrive_iso/"; protected String _configDriveSRName = "ConfigDriveISOs"; public String _attachIsoDeviceNum = "3"; protected XenServerUtilitiesHelper xenServerUtilitiesHelper = new XenServerUtilitiesHelper(); protected int _wait; // Hypervisor specific params with generic value, may need to be overridden // for specific versions long _xsMemoryUsed = 128 * 1024 * 1024L; // xenserver hypervisor used 128 M double _xsVirtualizationFactor = 63.0 / 64.0; // 1 - virtualization overhead protected StorageSubsystemCommandHandler storageHandler; private static final String XENSTORE_DATA_IP = "vm-data/ip"; private static final String XENSTORE_DATA_GATEWAY = "vm-data/gateway"; private static final String XENSTORE_DATA_NETMASK = "vm-data/netmask"; private static final String XENSTORE_DATA_CS_INIT = "vm-data/cloudstack/init"; public CitrixResourceBase() { } /** * Replaces the old password with the new password used to connect to the host. * * @param password - the new host password. * @return the old password. */ public String replaceOldPasswdInQueue(final String password) { final String oldPasswd = _password.poll(); _password.add(password); return oldPasswd; } public String getPwdFromQueue() { return _password.peek(); } public XenServerUtilitiesHelper getXenServerUtilitiesHelper() { return xenServerUtilitiesHelper; } protected StorageSubsystemCommandHandler buildStorageHandler() { final XenServerStorageProcessor processor = new XenServerStorageProcessor(this); return new StorageSubsystemCommandHandlerBase(processor); } public String callHostPlugin(final Connection conn, final String plugin, final String cmd, final String... params) { final Map<String, String> args = new HashMap<String, String>(); String msg; try { for (int i = 0; i < params.length; i += 2) { args.put(params[i], params[i + 1]); } if (s_logger.isTraceEnabled()) { s_logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args)); } final Host host = Host.getByUuid(conn, _host.getUuid()); final String result = host.callPlugin(conn, plugin, cmd, args); if (s_logger.isTraceEnabled()) { s_logger.trace("callHostPlugin Result: " + result); } return result.replace("\n", ""); } catch (final XenAPIException e) { msg = "callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(); s_logger.warn(msg); } catch (final XmlRpcException e) { msg = "callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.getMessage(); s_logger.debug(msg); } throw new CloudRuntimeException(msg); } protected String callHostPluginAsync(final Connection conn, final String plugin, final String cmd, final int wait, final Map<String, String> params) { final int timeout = wait * 1000; final Map<String, String> args = new HashMap<String, String>(); Task task = null; try { for (final Map.Entry<String, String> entry : params.entrySet()) { args.put(entry.getKey(), entry.getValue()); } if (s_logger.isTraceEnabled()) { s_logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args)); } final Host host = Host.getByUuid(conn, _host.getUuid()); task = host.callPluginAsync(conn, plugin, cmd, args); // poll every 1 seconds waitForTask(conn, task, 1000, timeout); checkForSuccess(conn, task); final String result = task.getResult(conn); if (s_logger.isTraceEnabled()) { s_logger.trace("callHostPlugin Result: " + result); } return result.replace("<value>", "").replace("</value>", "").replace("\n", ""); } catch (final Types.HandleInvalid e) { s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to HandleInvalid clazz:" + e.clazz + ", handle:" + e.handle); } catch (final Exception e) { s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(), e); } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e1) { s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); } } } return null; } protected String callHostPluginAsync(final Connection conn, final String plugin, final String cmd, final int wait, final String... params) { final int timeout = wait * 1000; final Map<String, String> args = new HashMap<String, String>(); Task task = null; try { for (int i = 0; i < params.length; i += 2) { args.put(params[i], params[i + 1]); } if (s_logger.isTraceEnabled()) { s_logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args)); } final Host host = Host.getByUuid(conn, _host.getUuid()); task = host.callPluginAsync(conn, plugin, cmd, args); // poll every 1 seconds waitForTask(conn, task, 1000, timeout); checkForSuccess(conn, task); final String result = task.getResult(conn); if (s_logger.isTraceEnabled()) { s_logger.trace("callHostPlugin Result: " + result); } return result.replace("<value>", "").replace("</value>", "").replace("\n", ""); } catch (final Types.HandleInvalid e) { s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to HandleInvalid clazz:" + e.clazz + ", handle:" + e.handle); } catch (final XenAPIException e) { s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(), e); } catch (final Exception e) { s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.getMessage(), e); } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e1) { s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); } } } return null; } public String callHostPluginPremium(final Connection conn, final String cmd, final String... params) { return callHostPlugin(conn, "vmopspremium", cmd, params); } protected String callHostPluginThroughMaster(final Connection conn, final String plugin, final String cmd, final String... params) { final Map<String, String> args = new HashMap<String, String>(); try { final Map<Pool, Pool.Record> poolRecs = Pool.getAllRecords(conn); if (poolRecs.size() != 1) { throw new CloudRuntimeException("There are " + poolRecs.size() + " pool for host :" + _host.getUuid()); } final Host master = poolRecs.values().iterator().next().master; for (int i = 0; i < params.length; i += 2) { args.put(params[i], params[i + 1]); } if (s_logger.isTraceEnabled()) { s_logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args)); } final String result = master.callPlugin(conn, plugin, cmd, args); if (s_logger.isTraceEnabled()) { s_logger.trace("callHostPlugin Result: " + result); } return result.replace("\n", ""); } catch (final Types.HandleInvalid e) { s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to HandleInvalid clazz:" + e.clazz + ", handle:" + e.handle); } catch (final XenAPIException e) { s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(), e); } catch (final XmlRpcException e) { s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.getMessage(), e); } return null; } public boolean canBridgeFirewall() { return _canBridgeFirewall; } public boolean canBridgeFirewall(final Connection conn) { return Boolean.valueOf(callHostPlugin(conn, "vmops", "can_bridge_firewall", "host_uuid", _host.getUuid(), "instance", _instance)); } public void checkForSuccess(final Connection c, final Task task) throws XenAPIException, XmlRpcException { if (task.getStatus(c) == Types.TaskStatusType.SUCCESS) { if (s_logger.isTraceEnabled()) { s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") completed"); } return; } else { final String msg = "Task failed! Task record: " + task.getRecord(c); s_logger.warn(msg); task.cancel(c); task.destroy(c); throw new Types.BadAsyncResult(msg); } } protected boolean checkSR(final Connection conn, final SR sr) { try { final SR.Record srr = sr.getRecord(conn); final Set<PBD> pbds = sr.getPBDs(conn); if (pbds.size() == 0) { final String msg = "There is no PBDs for this SR: " + srr.nameLabel + " on host:" + _host.getUuid(); s_logger.warn(msg); return false; } if (s_logger.isDebugEnabled()) { s_logger.debug("Checking " + srr.nameLabel + " or SR " + srr.uuid + " on " + _host); } if (srr.shared) { if (SRType.NFS.equals(srr.type)) { final Map<String, String> smConfig = srr.smConfig; if (!smConfig.containsKey("nosubdir")) { smConfig.put("nosubdir", "true"); sr.setSmConfig(conn, smConfig); } } final Host host = Host.getByUuid(conn, _host.getUuid()); boolean found = false; for (final PBD pbd : pbds) { final PBD.Record pbdr = pbd.getRecord(conn); if (host.equals(pbdr.host)) { if (!pbdr.currentlyAttached) { pbdPlug(conn, pbd, pbdr.uuid); } found = true; break; } } if (!found) { final PBD.Record pbdr = srr.PBDs.iterator().next().getRecord(conn); pbdr.host = host; pbdr.uuid = ""; final PBD pbd = PBD.create(conn, pbdr); pbdPlug(conn, pbd, pbd.getUuid(conn)); } } else { for (final PBD pbd : pbds) { final PBD.Record pbdr = pbd.getRecord(conn); if (!pbdr.currentlyAttached) { pbdPlug(conn, pbd, pbdr.uuid); } } } } catch (final Exception e) { final String msg = "checkSR failed host:" + _host + " due to " + e.toString(); s_logger.warn(msg, e); return false; } return true; } private void CheckXenHostInfo() throws ConfigurationException { final Connection conn = ConnPool.getConnect(_host.getIp(), _username, _password); if (conn == null) { throw new ConfigurationException("Can not create connection to " + _host.getIp()); } try { Host.Record hostRec = null; try { final Host host = Host.getByUuid(conn, _host.getUuid()); hostRec = host.getRecord(conn); final Pool.Record poolRec = Pool.getAllRecords(conn).values().iterator().next(); _host.setPool(poolRec.uuid); } catch (final Exception e) { throw new ConfigurationException("Can not get host information from " + _host.getIp()); } if (!hostRec.address.equals(_host.getIp())) { final String msg = "Host " + _host.getIp() + " seems be reinstalled, please remove this host and readd"; s_logger.error(msg); throw new ConfigurationException(msg); } } finally { try { Session.logout(conn); } catch (final Exception e) { } } } @Override public ExecutionResult cleanupCommand(final NetworkElementCommand cmd) { if (cmd instanceof IpAssocCommand && !(cmd instanceof IpAssocVpcCommand)) { return cleanupNetworkElementCommand((IpAssocCommand)cmd); } return new ExecutionResult(true, null); } public boolean cleanupHaltedVms(final Connection conn) throws XenAPIException, XmlRpcException { final Host host = Host.getByUuid(conn, _host.getUuid()); final Map<VM, VM.Record> vms = VM.getAllRecords(conn); boolean success = true; if (vms != null && !vms.isEmpty()) { for (final Map.Entry<VM, VM.Record> entry : vms.entrySet()) { final VM vm = entry.getKey(); final VM.Record vmRec = entry.getValue(); if (vmRec.isATemplate || vmRec.isControlDomain) { continue; } if (VmPowerState.HALTED.equals(vmRec.powerState) && vmRec.affinity.equals(host) && !isAlienVm(vm, conn)) { try { vm.destroy(conn); } catch (final Exception e) { s_logger.warn("Catch Exception " + e.getClass().getName() + ": unable to destroy VM " + vmRec.nameLabel + " due to ", e); success = false; } } } } return success; } protected ExecutionResult cleanupNetworkElementCommand(final IpAssocCommand cmd) { final Connection conn = getConnection(); final String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); final String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); final String lastIp = cmd.getAccessDetail(NetworkElementCommand.NETWORK_PUB_LAST_IP); try { final IpAddressTO[] ips = cmd.getIpAddresses(); for (final IpAddressTO ip : ips) { final VM router = getVM(conn, routerName); final NicTO nic = new NicTO(); nic.setMac(ip.getVifMacAddress()); nic.setType(ip.getTrafficType()); if (ip.getBroadcastUri() == null) { nic.setBroadcastType(BroadcastDomainType.Native); } else { final URI uri = BroadcastDomainType.fromString(ip.getBroadcastUri()); nic.setBroadcastType(BroadcastDomainType.getSchemeValue(uri)); nic.setBroadcastUri(uri); } nic.setDeviceId(0); nic.setNetworkRateMbps(ip.getNetworkRate()); nic.setName(ip.getNetworkName()); Network network = getNetwork(conn, nic); // If we are disassociating the last IP address in the VLAN, we // need // to remove a VIF boolean removeVif = false; // there is only one ip in this public vlan and removing it, so // remove the nic if (org.apache.commons.lang.StringUtils.equalsIgnoreCase(lastIp, "true") && !ip.isAdd()) { final VIF correctVif = getCorrectVif(conn, router, network); // in isolated network eth2 is the default public interface. We don't want to delete it. if (correctVif != null && !correctVif.getDevice(conn).equals("2")) { removeVif = true; } } if (removeVif) { // Determine the correct VIF on DomR to // associate/disassociate the // IP address with final VIF correctVif = getCorrectVif(conn, router, network); if (correctVif != null) { network = correctVif.getNetwork(conn); // Mark this vif to be removed from network usage networkUsage(conn, routerIp, "deleteVif", "eth" + correctVif.getDevice(conn)); // Remove the VIF from DomR correctVif.unplug(conn); correctVif.destroy(conn); // Disable the VLAN network if necessary disableVlanNetwork(conn, network); } } } } catch (final Exception e) { s_logger.debug("Ip Assoc failure on applying one ip due to exception: ", e); return new ExecutionResult(false, e.getMessage()); } return new ExecutionResult(true, null); } public void cleanupTemplateSR(final Connection conn) { Set<PBD> pbds = null; try { final Host host = Host.getByUuid(conn, _host.getUuid()); pbds = host.getPBDs(conn); } catch (final XenAPIException e) { s_logger.warn("Unable to get the SRs " + e.toString(), e); throw new CloudRuntimeException("Unable to get SRs " + e.toString(), e); } catch (final Exception e) { throw new CloudRuntimeException("Unable to get SRs " + e.getMessage(), e); } for (final PBD pbd : pbds) { SR sr = null; SR.Record srRec = null; try { sr = pbd.getSR(conn); srRec = sr.getRecord(conn); } catch (final Exception e) { s_logger.warn("pbd.getSR get Exception due to ", e); continue; } final String type = srRec.type; if (srRec.shared) { continue; } if (SRType.NFS.equals(type) || SRType.ISO.equals(type) && srRec.nameDescription.contains("template")) { try { pbd.unplug(conn); pbd.destroy(conn); sr.forget(conn); } catch (final Exception e) { s_logger.warn("forget SR catch Exception due to ", e); } } } } public void cleanUpTmpDomVif(final Connection conn, final Network nw) throws XenAPIException, XmlRpcException { final Pair<VM, VM.Record> vm = getControlDomain(conn); final VM dom0 = vm.first(); final Set<VIF> dom0Vifs = dom0.getVIFs(conn); for (final VIF v : dom0Vifs) { String vifName = "unknown"; try { final VIF.Record vifr = v.getRecord(conn); if (v.getNetwork(conn).getUuid(conn).equals(nw.getUuid(conn))) { if (vifr != null) { final Map<String, String> config = vifr.otherConfig; vifName = config.get("nameLabel"); } s_logger.debug("A VIF in dom0 for the network is found - so destroy the vif"); v.destroy(conn); s_logger.debug("Destroy temp dom0 vif" + vifName + " success"); } } catch (final Exception e) { s_logger.warn("Destroy temp dom0 vif " + vifName + "failed", e); } } } protected VDI cloudVDIcopy(final Connection conn, final VDI vdi, final SR sr, int wait) throws Exception { Task task = null; if (wait == 0) { wait = 2 * 60 * 60; } try { task = vdi.copyAsync(conn, sr); // poll every 1 seconds , timeout after 2 hours waitForTask(conn, task, 1000, (long)wait * 1000); checkForSuccess(conn, task); final VDI dvdi = Types.toVDI(task, conn); return dvdi; } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e) { s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e.toString()); } } } } public HashMap<String, String> clusterVMMetaDataSync(final Connection conn) { final HashMap<String, String> vmMetaDatum = new HashMap<String, String>(); try { final Map<VM, VM.Record> vm_map = VM.getAllRecords(conn); // USE if (vm_map != null) { for (final VM.Record record : vm_map.values()) { if (record.isControlDomain || record.isASnapshot || record.isATemplate) { continue; // Skip DOM0 } final String platform = StringUtils.mapToString(record.platform); if (platform.isEmpty()) { continue; //Skip if platform is null } vmMetaDatum.put(record.nameLabel, StringUtils.mapToString(record.platform)); } } } catch (final Throwable e) { final String msg = "Unable to get vms through host " + _host.getUuid() + " due to to " + e.toString(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg); } return vmMetaDatum; } @Override public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException { _name = name; try { _dcId = Long.parseLong((String)params.get("zone")); } catch (final NumberFormatException e) { throw new ConfigurationException("Unable to get the zone " + params.get("zone")); } _host.setUuid((String)params.get("guid")); _name = _host.getUuid(); _host.setIp((String)params.get("ipaddress")); _username = (String)params.get("username"); _password.add((String)params.get("password")); _pod = (String)params.get("pod"); _cluster = (String)params.get("cluster"); _privateNetworkName = (String)params.get("private.network.device"); _publicNetworkName = (String)params.get("public.network.device"); _guestNetworkName = (String)params.get("guest.network.device"); _instance = (String)params.get("instance.name"); _securityGroupEnabled = Boolean.parseBoolean((String)params.get("securitygroupenabled")); _linkLocalPrivateNetworkName = (String)params.get("private.linkLocal.device"); if (_linkLocalPrivateNetworkName == null) { _linkLocalPrivateNetworkName = "cloud_link_local_network"; } _storageNetworkName1 = (String)params.get("storage.network.device1"); _storageNetworkName2 = (String)params.get("storage.network.device2"); _heartbeatTimeout = NumbersUtil.parseInt((String)params.get("xenserver.heartbeat.timeout"), 120); _heartbeatInterval = NumbersUtil.parseInt((String)params.get("xenserver.heartbeat.interval"), 60); String value = (String)params.get("wait"); _wait = NumbersUtil.parseInt(value, 600); value = (String)params.get("migratewait"); _migratewait = NumbersUtil.parseInt(value, 3600); _maxNics = NumbersUtil.parseInt((String)params.get("xenserver.nics.max"), 7); if (_pod == null) { throw new ConfigurationException("Unable to get the pod"); } if (_host.getIp() == null) { throw new ConfigurationException("Unable to get the host address"); } if (_username == null) { throw new ConfigurationException("Unable to get the username"); } if (_password.peek() == null) { throw new ConfigurationException("Unable to get the password"); } if (_host.getUuid() == null) { throw new ConfigurationException("Unable to get the uuid"); } CheckXenHostInfo(); storageHandler = buildStorageHandler(); _vrResource = new VirtualRoutingResource(this); if (!_vrResource.configure(name, params)) { throw new ConfigurationException("Unable to configure VirtualRoutingResource"); } return true; } /** * This method creates a XenServer network and configures it for being used * as a L2-in-L3 tunneled network */ public synchronized Network configureTunnelNetwork(final Connection conn, final Long networkId, final long hostId, final String bridgeName) { try { final Network nw = findOrCreateTunnelNetwork(conn, bridgeName); // Invoke plugin to setup the bridge which will be used by this // network final String bridge = nw.getBridge(conn); final Map<String, String> nwOtherConfig = nw.getOtherConfig(conn); final String configuredHosts = nwOtherConfig.get("ovs-host-setup"); boolean configured = false; if (configuredHosts != null) { final String hostIdsStr[] = configuredHosts.split(","); for (final String hostIdStr : hostIdsStr) { if (hostIdStr.equals(((Long)hostId).toString())) { configured = true; break; } } } if (!configured) { String result; if (bridgeName.startsWith("OVS-DR-VPC-Bridge")) { result = callHostPlugin(conn, "ovstunnel", "setup_ovs_bridge_for_distributed_routing", "bridge", bridge, "key", bridgeName, "xs_nw_uuid", nw.getUuid(conn), "cs_host_id", ((Long)hostId).toString()); } else { result = callHostPlugin(conn, "ovstunnel", "setup_ovs_bridge", "bridge", bridge, "key", bridgeName, "xs_nw_uuid", nw.getUuid(conn), "cs_host_id", ((Long)hostId).toString()); } // Note down the fact that the ovs bridge has been setup final String[] res = result.split(":"); if (res.length != 2 || !res[0].equalsIgnoreCase("SUCCESS")) { throw new CloudRuntimeException("Unable to pre-configure OVS bridge " + bridge); } } return nw; } catch (final Exception e) { s_logger.warn("createandConfigureTunnelNetwork failed", e); return null; } } public String connect(final Connection conn, final String vmname, final String ipAddress) { return connect(conn, vmname, ipAddress, 3922); } public String connect(final Connection conn, final String vmName, final String ipAddress, final int port) { for (int i = 0; i <= _retry; i++) { try { final Set<VM> vms = VM.getByNameLabel(conn, vmName); if (vms.size() < 1) { final String msg = "VM " + vmName + " is not running"; s_logger.warn(msg); return msg; } } catch (final Exception e) { final String msg = "VM.getByNameLabel " + vmName + " failed due to " + e.toString(); s_logger.warn(msg, e); return msg; } if (s_logger.isDebugEnabled()) { s_logger.debug("Trying to connect to " + ipAddress + " attempt " + i + " of " + _retry); } if (pingdomr(conn, ipAddress, Integer.toString(port))) { return null; } try { Thread.sleep(_sleep); } catch (final InterruptedException e) { } } final String msg = "Timeout, Unable to logon to " + ipAddress; s_logger.debug(msg); return msg; } public String copyVhdFromSecondaryStorage(final Connection conn, final String mountpoint, final String sruuid, final int wait) { final String nameLabel = "cloud-" + UUID.randomUUID().toString(); final String results = callHostPluginAsync(conn, "vmopspremium", "copy_vhd_from_secondarystorage", wait, "mountpoint", mountpoint, "sruuid", sruuid, "namelabel", nameLabel); String errMsg = null; if (results == null || results.isEmpty()) { errMsg = "copy_vhd_from_secondarystorage return null"; } else { final String[] tmp = results.split("#"); final String status = tmp[0]; if (status.equals("0")) { return tmp[1]; } else { errMsg = tmp[1]; } } final String source = mountpoint.substring(mountpoint.lastIndexOf('/') + 1); if (killCopyProcess(conn, source)) { destroyVDIbyNameLabel(conn, nameLabel); } s_logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @Override public ExecutionResult createFileInVR(final String routerIp, final String path, final String filename, final String content) { final Connection conn = getConnection(); final String hostPath = "/tmp/"; s_logger.debug("Copying VR with ip " + routerIp + " config file into host " + _host.getIp()); try { SshHelper.scpTo(_host.getIp(), 22, _username, null, _password.peek(), hostPath, content.getBytes(Charset.defaultCharset()), filename, null); } catch (final Exception e) { s_logger.warn("scp VR config file into host " + _host.getIp() + " failed with exception " + e.getMessage().toString()); } final String rc = callHostPlugin(conn, "vmops", "createFileInDomr", "domrip", routerIp, "srcfilepath", hostPath + filename, "dstfilepath", path); s_logger.debug("VR Config file " + filename + " got created in VR, ip " + routerIp + " with content \n" + content); return new ExecutionResult(rc.startsWith("succ#"), rc.substring(5)); } protected SR createIsoSRbyURI(final Connection conn, final URI uri, final String vmName, final boolean shared) { try { final Map<String, String> deviceConfig = new HashMap<String, String>(); String path = uri.getPath(); path = path.replace("//", "/"); deviceConfig.put("location", uri.getHost() + ":" + path); final Host host = Host.getByUuid(conn, _host.getUuid()); final SR sr = SR.create(conn, host, deviceConfig, new Long(0), uri.getHost() + path, "iso", "iso", "iso", shared, new HashMap<String, String>()); sr.setNameLabel(conn, vmName + "-ISO"); sr.setNameDescription(conn, deviceConfig.get("location")); sr.scan(conn); return sr; } catch (final XenAPIException e) { final String msg = "createIsoSRbyURI failed! mountpoint: " + uri.getHost() + uri.getPath() + " due to " + e.toString(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } catch (final Exception e) { final String msg = "createIsoSRbyURI failed! mountpoint: " + uri.getHost() + uri.getPath() + " due to " + e.getMessage(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } } protected SR createNfsSRbyURI(final Connection conn, final URI uri, final boolean shared) { try { if (s_logger.isDebugEnabled()) { s_logger.debug("Creating a " + (shared ? "shared SR for " : "not shared SR for ") + uri); } final Map<String, String> deviceConfig = new HashMap<String, String>(); String path = uri.getPath(); path = path.replace("//", "/"); deviceConfig.put("server", uri.getHost()); deviceConfig.put("serverpath", path); final String name = UUID.nameUUIDFromBytes(new String(uri.getHost() + path).getBytes()).toString(); if (!shared) { final Set<SR> srs = SR.getByNameLabel(conn, name); for (final SR sr : srs) { final SR.Record record = sr.getRecord(conn); if (SRType.NFS.equals(record.type) && record.contentType.equals("user") && !record.shared) { removeSRSync(conn, sr); } } } final Host host = Host.getByUuid(conn, _host.getUuid()); final Map<String, String> smConfig = new HashMap<String, String>(); smConfig.put("nosubdir", "true"); final SR sr = SR.create(conn, host, deviceConfig, new Long(0), name, uri.getHost() + uri.getPath(), SRType.NFS.toString(), "user", shared, smConfig); if (!checkSR(conn, sr)) { throw new Exception("no attached PBD"); } if (s_logger.isDebugEnabled()) { s_logger.debug(logX(sr, "Created a SR; UUID is " + sr.getUuid(conn) + " device config is " + deviceConfig)); } sr.scan(conn); return sr; } catch (final XenAPIException e) { final String msg = "Can not create second storage SR mountpoint: " + uri.getHost() + uri.getPath() + " due to " + e.toString(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } catch (final Exception e) { final String msg = "Can not create second storage SR mountpoint: " + uri.getHost() + uri.getPath() + " due to " + e.getMessage(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } } public VBD createPatchVbd(final Connection conn, final String vmName, final VM vm) throws XmlRpcException, XenAPIException { if (_host.getSystemvmisouuid() == null) { Set<SR> srs = SR.getByNameLabel(conn, "XenServer Tools"); if (srs.size() != 1) { s_logger.debug("Failed to find SR by name 'XenServer Tools', will try to find 'XCP-ng Tools' SR"); srs = SR.getByNameLabel(conn, "XCP-ng Tools"); if (srs.size() != 1) { throw new CloudRuntimeException("There are " + srs.size() + " SRs with name XenServer Tools"); } } final SR sr = srs.iterator().next(); sr.scan(conn); final SR.Record srr = sr.getRecord(conn); if (_host.getSystemvmisouuid() == null) { for (final VDI vdi : srr.VDIs) { final VDI.Record vdir = vdi.getRecord(conn); if (vdir.nameLabel.contains("systemvm.iso")) { _host.setSystemvmisouuid(vdir.uuid); break; } } } if (_host.getSystemvmisouuid() == null) { throw new CloudRuntimeException("can not find systemvmiso"); } } final VBD.Record cdromVBDR = new VBD.Record(); cdromVBDR.VM = vm; cdromVBDR.empty = true; cdromVBDR.bootable = false; cdromVBDR.userdevice = "3"; cdromVBDR.mode = Types.VbdMode.RO; cdromVBDR.type = Types.VbdType.CD; final VBD cdromVBD = VBD.create(conn, cdromVBDR); cdromVBD.insert(conn, VDI.getByUuid(conn, _host.getSystemvmisouuid())); return cdromVBD; } protected boolean createSecondaryStorageFolder(final Connection conn, final String remoteMountPath, final String newFolder) { final String result = callHostPlugin(conn, "vmopsSnapshot", "create_secondary_storage_folder", "remoteMountPath", remoteMountPath, "newFolder", newFolder); return result != null; } String createTemplateFromSnapshot(final Connection conn, final String templatePath, final String snapshotPath, final int wait) { final String tmpltLocalDir = UUID.randomUUID().toString(); final String results = callHostPluginAsync(conn, "vmopspremium", "create_privatetemplate_from_snapshot", wait, "templatePath", templatePath, "snapshotPath", snapshotPath, "tmpltLocalDir", tmpltLocalDir); String errMsg = null; if (results == null || results.isEmpty()) { errMsg = "create_privatetemplate_from_snapshot return null"; } else { final String[] tmp = results.split("#"); final String status = tmp[0]; if (status.equals("0")) { return results; } else { errMsg = "create_privatetemplate_from_snapshot failed due to " + tmp[1]; } } final String source = "cloud_mount/" + tmpltLocalDir; killCopyProcess(conn, source); s_logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } public VBD createVbd(final Connection conn, final DiskTO volume, final String vmName, final VM vm, final BootloaderType bootLoaderType, VDI vdi) throws XmlRpcException, XenAPIException { final Volume.Type type = volume.getType(); if (vdi == null) { vdi = mount(conn, vmName, volume); } if (vdi != null) { if ("detached".equals(vdi.getNameLabel(conn))) { vdi.setNameLabel(conn, vmName + "-DATA"); } final Map<String, String> smConfig = vdi.getSmConfig(conn); for (final String key : smConfig.keySet()) { if (key.startsWith("host_")) { vdi.removeFromSmConfig(conn, key); break; } } } final VBD.Record vbdr = new VBD.Record(); vbdr.VM = vm; if (vdi != null) { vbdr.VDI = vdi; } else { vbdr.empty = true; } if (type == Volume.Type.ROOT && bootLoaderType == BootloaderType.PyGrub) { vbdr.bootable = true; } else if (type == Volume.Type.ISO && bootLoaderType == BootloaderType.CD) { vbdr.bootable = true; } if (volume.getType() == Volume.Type.ISO) { vbdr.mode = Types.VbdMode.RO; vbdr.type = Types.VbdType.CD; vbdr.userdevice = "3"; } else { vbdr.mode = Types.VbdMode.RW; vbdr.type = Types.VbdType.DISK; vbdr.unpluggable = (volume.getType() == Volume.Type.ROOT) ? false : true; vbdr.userdevice = "autodetect"; final Long deviceId = volume.getDiskSeq(); if (deviceId != null && (!isDeviceUsed(conn, vm, deviceId) || deviceId > 3)) { vbdr.userdevice = deviceId.toString(); } } final VBD vbd = VBD.create(conn, vbdr); if (s_logger.isDebugEnabled()) { s_logger.debug("VBD " + vbd.getUuid(conn) + " created for " + volume); } return vbd; } public VDI createVdi(final SR sr, final String vdiNameLabel, final Long volumeSize) throws Types.XenAPIException, XmlRpcException { final Connection conn = getConnection(); final VDI.Record vdir = new VDI.Record(); vdir.nameLabel = vdiNameLabel; vdir.SR = sr; vdir.type = Types.VdiType.USER; final long totalSrSpace = sr.getPhysicalSize(conn); final long unavailableSrSpace = sr.getPhysicalUtilisation(conn); final long availableSrSpace = totalSrSpace - unavailableSrSpace; if (availableSrSpace < volumeSize) { throw new CloudRuntimeException("Available space for SR cannot be less than " + volumeSize + "."); } vdir.virtualSize = volumeSize; return VDI.create(conn, vdir); } public void createVGPU(final Connection conn, final StartCommand cmd, final VM vm, final GPUDeviceTO gpuDevice) throws XenAPIException, XmlRpcException { } public VIF createVif(final Connection conn, final String vmName, final VM vm, final VirtualMachineTO vmSpec, final NicTO nic) throws XmlRpcException, XenAPIException { assert nic.getUuid() != null : "Nic should have a uuid value"; if (s_logger.isDebugEnabled()) { s_logger.debug("Creating VIF for " + vmName + " on nic " + nic); } VIF.Record vifr = new VIF.Record(); vifr.VM = vm; vifr.device = Integer.toString(nic.getDeviceId()); vifr.MAC = nic.getMac(); // Nicira needs these IDs to find the NIC vifr.otherConfig = new HashMap<String, String>(); vifr.otherConfig.put("nicira-iface-id", nic.getUuid()); vifr.otherConfig.put("nicira-vm-id", vm.getUuid(conn)); // Provide XAPI with the cloudstack vm and nic uids. vifr.otherConfig.put("cloudstack-nic-id", nic.getUuid()); if (vmSpec != null) { vifr.otherConfig.put("cloudstack-vm-id", vmSpec.getUuid()); } // OVS plugin looks at network UUID in the vif 'otherconfig' details to // group VIF's & tunnel ports as part of tier // when bridge is setup for distributed routing vifr.otherConfig.put("cloudstack-network-id", nic.getNetworkUuid()); vifr.network = getNetwork(conn, nic); if (nic.getNetworkRateMbps() != null && nic.getNetworkRateMbps().intValue() != -1) { vifr.qosAlgorithmType = "ratelimit"; vifr.qosAlgorithmParams = new HashMap<String, String>(); // convert mbs to kilobyte per second vifr.qosAlgorithmParams.put("kbps", Integer.toString(nic.getNetworkRateMbps() * 128)); } vifr.lockingMode = Types.VifLockingMode.NETWORK_DEFAULT; final VIF vif = VIF.create(conn, vifr); if (s_logger.isDebugEnabled()) { vifr = vif.getRecord(conn); if (vifr != null) { s_logger.debug("Created a vif " + vifr.uuid + " on " + nic.getDeviceId()); } } return vif; } public VM createVmFromTemplate(final Connection conn, final VirtualMachineTO vmSpec, final Host host) throws XenAPIException, XmlRpcException { final String guestOsTypeName = getGuestOsType(vmSpec.getPlatformEmulator()); final Set<VM> templates = VM.getByNameLabel(conn, guestOsTypeName); if (templates == null || templates.isEmpty()) { throw new CloudRuntimeException("Cannot find template " + guestOsTypeName + " on XenServer host"); } assert templates.size() == 1 : "Should only have 1 template but found " + templates.size(); final VM template = templates.iterator().next(); final VM.Record vmr = template.getRecord(conn); vmr.affinity = host; vmr.otherConfig.remove("disks"); vmr.otherConfig.remove("default_template"); vmr.otherConfig.remove("mac_seed"); vmr.isATemplate = false; vmr.nameLabel = vmSpec.getName(); vmr.actionsAfterCrash = Types.OnCrashBehaviour.DESTROY; vmr.actionsAfterShutdown = Types.OnNormalExit.DESTROY; vmr.otherConfig.put("vm_uuid", vmSpec.getUuid()); vmr.VCPUsMax = (long)vmSpec.getCpus(); // FIX ME: In case of dynamic // scaling this VCPU max should // be the minumum of // recommended value for that template and capacity remaining on host long recommendedMemoryMin = 0l; long recommendedMemoryMax = 0l; Map<String, String> guestOsDetails = vmSpec.getGuestOsDetails(); if (guestOsDetails != null) { if (guestOsDetails.containsKey("xenserver.dynamicMin")) { recommendedMemoryMin = Long.valueOf(guestOsDetails.get("xenserver.dynamicMin")).longValue(); } if (guestOsDetails.containsKey("xenserver.dynamicMax")) { recommendedMemoryMax = Long.valueOf(guestOsDetails.get("xenserver.dynamicMax")).longValue(); } } if (isDmcEnabled(conn, host) && vmSpec.isEnableDynamicallyScaleVm()) { // scaling is allowed vmr.memoryStaticMin = getStaticMin(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD, vmSpec.getMinRam(), vmSpec.getMaxRam(), recommendedMemoryMin); vmr.memoryStaticMax = getStaticMax(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD, vmSpec.getMinRam(), vmSpec.getMaxRam(), recommendedMemoryMax); vmr.memoryDynamicMin = vmSpec.getMinRam(); vmr.memoryDynamicMax = vmSpec.getMaxRam(); if (guestOsTypeName.toLowerCase().contains("windows")) { vmr.VCPUsMax = (long)vmSpec.getCpus(); } else { if (vmSpec.getVcpuMaxLimit() != null) { vmr.VCPUsMax = (long)vmSpec.getVcpuMaxLimit(); } } } else { // scaling disallowed, set static memory target if (vmSpec.isEnableDynamicallyScaleVm() && !isDmcEnabled(conn, host)) { s_logger.warn("Host " + host.getHostname(conn) + " does not support dynamic scaling, so the vm " + vmSpec.getName() + " is not dynamically scalable"); } vmr.memoryStaticMin = vmSpec.getMinRam(); vmr.memoryStaticMax = vmSpec.getMaxRam(); vmr.memoryDynamicMin = vmSpec.getMinRam(); vmr.memoryDynamicMax = vmSpec.getMaxRam(); vmr.VCPUsMax = (long)vmSpec.getCpus(); } vmr.VCPUsAtStartup = (long)vmSpec.getCpus(); vmr.consoles.clear(); vmr.xenstoreData.clear(); //Add xenstore data for the NetscalerVM if (vmSpec.getType() == VirtualMachine.Type.NetScalerVm) { NicTO mgmtNic = vmSpec.getNics()[0]; if (mgmtNic != null) { Map<String, String> xenstoreData = new HashMap<String, String>(3); xenstoreData.put(XENSTORE_DATA_IP, mgmtNic.getIp().toString().trim()); xenstoreData.put(XENSTORE_DATA_GATEWAY, mgmtNic.getGateway().toString().trim()); xenstoreData.put(XENSTORE_DATA_NETMASK, mgmtNic.getNetmask().toString().trim()); vmr.xenstoreData = xenstoreData; } } final VM vm = VM.create(conn, vmr); s_logger.debug("Created VM " + vm.getUuid(conn) + " for " + vmSpec.getName()); final Map<String, String> vcpuParams = new HashMap<String, String>(); final Integer speed = vmSpec.getMinSpeed(); if (speed != null) { int cpuWeight = _maxWeight; // cpu_weight int utilization = 0; // max CPU cap, default is unlimited // weight based allocation, CPU weight is calculated per VCPU cpuWeight = (int)(speed * 0.99 / _host.getSpeed() * _maxWeight); if (cpuWeight > _maxWeight) { cpuWeight = _maxWeight; } if (vmSpec.getLimitCpuUse()) { // CPU cap is per VM, so need to assign cap based on the number // of vcpus utilization = (int)(vmSpec.getMaxSpeed() * 0.99 * vmSpec.getCpus() / _host.getSpeed() * 100); } vcpuParams.put("weight", Integer.toString(cpuWeight)); vcpuParams.put("cap", Integer.toString(utilization)); } if (vcpuParams.size() > 0) { vm.setVCPUsParams(conn, vcpuParams); } final String bootArgs = vmSpec.getBootArgs(); if (bootArgs != null && bootArgs.length() > 0) { // send boot args for PV instances String pvargs = vm.getPVArgs(conn); pvargs = pvargs + vmSpec.getBootArgs().replaceAll(" ", "%"); vm.setPVArgs(conn, pvargs); s_logger.debug("PV args are " + pvargs); // send boot args into xenstore-data for HVM instances Map<String, String> xenstoreData = new HashMap<>(); xenstoreData.put(XENSTORE_DATA_CS_INIT, bootArgs); vm.setXenstoreData(conn, xenstoreData); s_logger.debug("HVM args are " + bootArgs); } if (!(guestOsTypeName.startsWith("Windows") || guestOsTypeName.startsWith("Citrix") || guestOsTypeName.startsWith("Other"))) { if (vmSpec.getBootloader() == BootloaderType.CD) { final DiskTO[] disks = vmSpec.getDisks(); for (final DiskTO disk : disks) { if (disk.getType() == Volume.Type.ISO) { final TemplateObjectTO iso = (TemplateObjectTO)disk.getData(); final String osType = iso.getGuestOsType(); if (osType != null) { final String isoGuestOsName = getGuestOsType(vmSpec.getPlatformEmulator()); if (!isoGuestOsName.equals(guestOsTypeName)) { vmSpec.setBootloader(BootloaderType.PyGrub); } } } } } if (vmSpec.getBootloader() == BootloaderType.CD) { vm.setPVBootloader(conn, "eliloader"); if (!vm.getOtherConfig(conn).containsKey("install-repository")) { vm.addToOtherConfig(conn, "install-repository", "cdrom"); } } else if (vmSpec.getBootloader() == BootloaderType.PyGrub) { vm.setPVBootloader(conn, "pygrub"); vm.setPVBootloaderArgs(conn, CitrixHelper.getPVbootloaderArgs(guestOsTypeName)); } else { vm.destroy(conn); throw new CloudRuntimeException("Unable to handle boot loader type: " + vmSpec.getBootloader()); } } try { finalizeVmMetaData(vm, conn, vmSpec); } catch (final Exception e) { throw new CloudRuntimeException("Unable to finalize VM MetaData: " + vmSpec); } return vm; } public VM createWorkingVM(final Connection conn, final String vmName, final String guestOSType, final String platformEmulator, final List<VolumeObjectTO> listVolumeTo) throws BadServerResponse, Types.VmBadPowerState, Types.SrFull, Types.OperationNotAllowed, XenAPIException, XmlRpcException { // below is redundant but keeping for consistency and code readabilty final String guestOsTypeName = platformEmulator; if (guestOsTypeName == null) { final String msg = " Hypervisor " + this.getClass().getName() + " doesn't support guest OS type " + guestOSType + ". you can choose 'Other install media' to run it as HVM"; s_logger.warn(msg); throw new CloudRuntimeException(msg); } final VM template = getVM(conn, guestOsTypeName); final VM vm = template.createClone(conn, vmName); vm.setIsATemplate(conn, false); final Map<VDI, VolumeObjectTO> vdiMap = new HashMap<VDI, VolumeObjectTO>(); for (final VolumeObjectTO volume : listVolumeTo) { final String vdiUuid = volume.getPath(); try { final VDI vdi = VDI.getByUuid(conn, vdiUuid); vdiMap.put(vdi, volume); } catch (final Types.UuidInvalid e) { s_logger.warn("Unable to find vdi by uuid: " + vdiUuid + ", skip it"); } } for (final Map.Entry<VDI, VolumeObjectTO> entry : vdiMap.entrySet()) { final VDI vdi = entry.getKey(); final VolumeObjectTO volumeTO = entry.getValue(); final VBD.Record vbdr = new VBD.Record(); vbdr.VM = vm; vbdr.VDI = vdi; if (volumeTO.getVolumeType() == Volume.Type.ROOT) { vbdr.bootable = true; vbdr.unpluggable = false; } else { vbdr.bootable = false; vbdr.unpluggable = true; } vbdr.userdevice = "autodetect"; vbdr.mode = Types.VbdMode.RW; vbdr.type = Types.VbdType.DISK; Long deviceId = volumeTO.getDeviceId(); if (deviceId != null && (!isDeviceUsed(conn, vm, deviceId) || deviceId > 3)) { vbdr.userdevice = deviceId.toString(); } VBD.create(conn, vbdr); } return vm; } protected boolean deleteSecondaryStorageFolder(final Connection conn, final String remoteMountPath, final String folder) { final String details = callHostPlugin(conn, "vmopsSnapshot", "delete_secondary_storage_folder", "remoteMountPath", remoteMountPath, "folder", folder); return details != null && details.equals("1"); } protected String deleteSnapshotBackup(final Connection conn, final Long dcId, final Long accountId, final Long volumeId, final String secondaryStorageMountPath, final String backupUUID) { // If anybody modifies the formatting below again, I'll skin them final String result = callHostPlugin(conn, "vmopsSnapshot", "deleteSnapshotBackup", "backupUUID", backupUUID, "dcId", dcId.toString(), "accountId", accountId.toString(), "volumeId", volumeId.toString(), "secondaryStorageMountPath", secondaryStorageMountPath); return result; } public void destroyPatchVbd(final Connection conn, final String vmName) throws XmlRpcException, XenAPIException { try { if (!vmName.startsWith("r-") && !vmName.startsWith("s-") && !vmName.startsWith("v-")) { return; } final Set<VM> vms = VM.getByNameLabel(conn, vmName); for (final VM vm : vms) { final Set<VBD> vbds = vm.getVBDs(conn); for (final VBD vbd : vbds) { if (vbd.getType(conn) == Types.VbdType.CD) { vbd.eject(conn); vbd.destroy(conn); break; } } } } catch (final Exception e) { s_logger.debug("Cannot destory CD-ROM device for VM " + vmName + " due to " + e.toString(), e); } } public synchronized void destroyTunnelNetwork(final Connection conn, final Network nw, final long hostId) { try { final String bridge = nw.getBridge(conn); final String result = callHostPlugin(conn, "ovstunnel", "destroy_ovs_bridge", "bridge", bridge, "cs_host_id", ((Long)hostId).toString()); final String[] res = result.split(":"); if (res.length != 2 || !res[0].equalsIgnoreCase("SUCCESS")) { throw new CloudRuntimeException("Unable to remove OVS bridge " + bridge + ":" + result); } return; } catch (final Exception e) { s_logger.warn("destroyTunnelNetwork failed:", e); return; } } void destroyVDIbyNameLabel(final Connection conn, final String nameLabel) { try { final Set<VDI> vdis = VDI.getByNameLabel(conn, nameLabel); if (vdis.size() != 1) { s_logger.warn("destoryVDIbyNameLabel failed due to there are " + vdis.size() + " VDIs with name " + nameLabel); return; } for (final VDI vdi : vdis) { try { vdi.destroy(conn); } catch (final Exception e) { final String msg = "Failed to destroy VDI : " + nameLabel + "due to " + e.toString() + "\n Force deleting VDI using system 'rm' command"; s_logger.warn(msg); try { final String srUUID = vdi.getSR(conn).getUuid(conn); final String vdiUUID = vdi.getUuid(conn); final String vdifile = "/var/run/sr-mount/" + srUUID + "/" + vdiUUID + ".vhd"; callHostPluginAsync(conn, "vmopspremium", "remove_corrupt_vdi", 10, "vdifile", vdifile); } catch (final Exception e2) { s_logger.warn(e2); } } } } catch (final Exception e) { } } public void disableVlanNetwork(final Connection conn, final Network network) { } @Override public void disconnected() { } public boolean doPingTest(final Connection conn, final String computingHostIp) { final com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_host.getIp(), 22); try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password.peek())) { throw new CloudRuntimeException("Unable to authenticate"); } final String cmd = "ping -c 2 " + computingHostIp; if (!SSHCmdHelper.sshExecuteCmd(sshConnection, cmd)) { throw new CloudRuntimeException("Cannot ping host " + computingHostIp + " from host " + _host.getIp()); } return true; } catch (final Exception e) { s_logger.warn("Catch exception " + e.toString(), e); return false; } finally { sshConnection.close(); } } public boolean doPingTest(final Connection conn, final String domRIp, final String vmIp) { final String args = "-i " + domRIp + " -p " + vmIp; final String result = callHostPlugin(conn, "vmops", "pingtest", "args", args); if (result == null || result.isEmpty()) { return false; } return true; } /** * enableVlanNetwork creates a Network object, Vlan object, and thereby a * tagged PIF object in Xapi. * * In XenServer, VLAN is added by - Create a network, which is unique * cluster wide. - Find the PIF that you want to create the VLAN on. - * Create a VLAN using the network and the PIF. As a result of this * operation, a tagged PIF object is also created. * * Here is a list of problems with clustered Xapi implementation that we are * trying to circumvent. - There can be multiple Networks with the same * name-label so searching using name-label is not unique. - There are no * other ways to search for Networks other than listing all of them which is * not efficient in our implementation because we can have over 4000 VLAN * networks. - In a clustered situation, it's possible for both hosts to * detect that the Network is missing and both creates it. This causes a lot * of problems as one host may be using one Network and another may be using * a different network for their VMs. This causes problems in migration * because the VMs are logically attached to different networks in Xapi's * database but in reality, they are attached to the same network. * * To work around these problems, we do the following. * * - When creating the VLAN network, we name it as VLAN-UUID of the Network * it is created on-VLAN Tag. Because VLAN tags is unique with one * particular network, this is a unique name-label to quickly retrieve the * the VLAN network with when we need it again. - When we create the VLAN * network, we add a timestamp and a random number as a tag into the * network. Then instead of creating VLAN on that network, we actually * retrieve the Network again and this time uses the VLAN network with * lowest timestamp or lowest random number as the VLAN network. This allows * VLAN creation to happen on multiple hosts concurrently but even if two * VLAN networks were created with the same name, only one of them is used. * * One cavaet about this approach is that it relies on the timestamp to be * relatively accurate among different hosts. * * @param conn * Xapi Connection * @param tag * VLAN tag * @param network * network on this host to create the VLAN on. * @return VLAN Network created. * @throws XenAPIException * @throws XmlRpcException */ protected Network enableVlanNetwork(final Connection conn, final long tag, final XsLocalNetwork network) throws XenAPIException, XmlRpcException { Network vlanNetwork = null; final String oldName = "VLAN" + Long.toString(tag); final String newName = "VLAN-" + network.getNetworkRecord(conn).uuid + "-" + tag; XsLocalNetwork vlanNic = getNetworkByName(conn, newName); if (vlanNic == null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Couldn't find vlan network with the new name so trying old name: " + oldName); } vlanNic = getNetworkByName(conn, oldName); if (vlanNic != null) { s_logger.info("Renaming VLAN with old name " + oldName + " to " + newName); vlanNic.getNetwork().setNameLabel(conn, newName); } } if (vlanNic == null) { // Can't find it, then create it. if (s_logger.isDebugEnabled()) { s_logger.debug("Creating VLAN network for " + tag + " on host " + _host.getIp()); } final Network.Record nwr = new Network.Record(); nwr.nameLabel = newName; nwr.tags = new HashSet<String>(); nwr.tags.add(generateTimeStamp()); vlanNetwork = Network.create(conn, nwr); vlanNic = getNetworkByName(conn, newName); if (vlanNic == null) { // Still vlanNic is null means we could not // create it for some reason and no exception // capture happened. throw new CloudRuntimeException("Could not find/create vlan network with name: " + newName); } } final PIF nPif = network.getPif(conn); final PIF.Record nPifr = network.getPifRecord(conn); vlanNetwork = vlanNic.getNetwork(); if (vlanNic.getPif(conn) != null) { return vlanNetwork; } if (s_logger.isDebugEnabled()) { s_logger.debug("Creating VLAN " + tag + " on host " + _host.getIp() + " on device " + nPifr.device); } final VLAN vlan = VLAN.create(conn, nPif, tag, vlanNetwork); if (vlan != null) { final VLAN.Record vlanr = vlan.getRecord(conn); if (vlanr != null) { if (s_logger.isDebugEnabled()) { s_logger.debug("VLAN is created for " + tag + ". The uuid is " + vlanr.uuid); } } } return vlanNetwork; } @Override public RebootAnswer execute(final RebootCommand cmd) { throw new CloudRuntimeException("The method has been replaced but the implementation CitrixRebootCommandWrapper. " + "Please use the new design in order to keep compatibility. Once all ServerResource implementation are refactored those methods will dissapper."); } @Override public StartAnswer execute(final StartCommand cmd) { throw new CloudRuntimeException("The method has been replaced but the implementation CitrixStartCommandWrapper. " + "Please use the new design in order to keep compatibility. Once all ServerResource implementation are refactored those methods will dissapper."); } @Override public StopAnswer execute(final StopCommand cmd) { throw new CloudRuntimeException("The method has been replaced but the implementation CitrixStopCommandWrapper. " + "Please use the new design in order to keep compatibility. Once all ServerResource implementation are refactored those methods will dissapper."); } @Override public ExecutionResult executeInVR(final String routerIP, final String script, final String args) { // Timeout is 120 seconds by default return executeInVR(routerIP, script, args, VRScripts.VR_SCRIPT_EXEC_TIMEOUT); } @Override public ExecutionResult executeInVR(final String routerIP, final String script, final String args, final Duration timeout) { Pair<Boolean, String> result; String cmdline = "/opt/cloud/bin/router_proxy.sh " + script + " " + routerIP + " " + args; // semicolon need to be escape for bash cmdline = cmdline.replaceAll(";", "\\\\;"); try { s_logger.debug("Executing command in VR: " + cmdline); result = SshHelper.sshExecute(_host.getIp(), 22, _username, null, _password.peek(), cmdline, VRScripts.CONNECTION_TIMEOUT, VRScripts.CONNECTION_TIMEOUT, timeout); } catch (final Exception e) { return new ExecutionResult(false, e.getMessage()); } return new ExecutionResult(result.first(), result.second()); } @Override public Answer executeRequest(final Command cmd) { final CitrixRequestWrapper wrapper = CitrixRequestWrapper.getInstance(); try { return wrapper.execute(cmd, this); } catch (final Exception e) { return Answer.createUnsupportedCommandAnswer(cmd); } } protected void fillHostInfo(final Connection conn, final StartupRoutingCommand cmd) { final StringBuilder caps = new StringBuilder(); try { final Host host = Host.getByUuid(conn, _host.getUuid()); final Host.Record hr = host.getRecord(conn); Map<String, String> details = cmd.getHostDetails(); if (details == null) { details = new HashMap<String, String>(); } String productBrand = hr.softwareVersion.get("product_brand"); if (productBrand == null) { productBrand = hr.softwareVersion.get("platform_name"); } details.put("product_brand", productBrand); details.put("product_version", _host.getProductVersion()); if (hr.softwareVersion.get("product_version_text_short") != null) { details.put("product_version_text_short", hr.softwareVersion.get("product_version_text_short")); cmd.setHypervisorVersion(hr.softwareVersion.get("product_version_text_short")); cmd.setHypervisorVersion(_host.getProductVersion()); } if (_privateNetworkName != null) { details.put("private.network.device", _privateNetworkName); } cmd.setHostDetails(details); cmd.setName(hr.nameLabel); cmd.setGuid(_host.getUuid()); cmd.setPool(_host.getPool()); cmd.setDataCenter(Long.toString(_dcId)); for (final String cap : hr.capabilities) { if (cap.length() > 0) { caps.append(cap).append(" , "); } } if (caps.length() > 0) { caps.delete(caps.length() - 3, caps.length()); } cmd.setCaps(caps.toString()); cmd.setSpeed(_host.getSpeed()); cmd.setCpuSockets(_host.getCpuSockets()); cmd.setCpus(_host.getCpus()); final HostMetrics hm = host.getMetrics(conn); long ram = 0; long dom0Ram = 0; ram = hm.getMemoryTotal(conn); final Set<VM> vms = host.getResidentVMs(conn); for (final VM vm : vms) { if (vm.getIsControlDomain(conn)) { dom0Ram = vm.getMemoryStaticMax(conn); break; } } ram = (long)((ram - dom0Ram - _xsMemoryUsed) * _xsVirtualizationFactor); cmd.setMemory(ram); cmd.setDom0MinMemory(dom0Ram); if (s_logger.isDebugEnabled()) { s_logger.debug("Total Ram: " + ram + " dom0 Ram: " + dom0Ram); } PIF pif = PIF.getByUuid(conn, _host.getPrivatePif()); PIF.Record pifr = pif.getRecord(conn); if (pifr.IP != null && pifr.IP.length() > 0) { cmd.setPrivateIpAddress(pifr.IP); cmd.setPrivateMacAddress(pifr.MAC); cmd.setPrivateNetmask(pifr.netmask); } else { cmd.setPrivateIpAddress(_host.getIp()); cmd.setPrivateMacAddress(pifr.MAC); cmd.setPrivateNetmask("255.255.255.0"); } pif = PIF.getByUuid(conn, _host.getPublicPif()); pifr = pif.getRecord(conn); if (pifr.IP != null && pifr.IP.length() > 0) { cmd.setPublicIpAddress(pifr.IP); cmd.setPublicMacAddress(pifr.MAC); cmd.setPublicNetmask(pifr.netmask); } if (_host.getStoragePif1() != null) { pif = PIF.getByUuid(conn, _host.getStoragePif1()); pifr = pif.getRecord(conn); if (pifr.IP != null && pifr.IP.length() > 0) { cmd.setStorageIpAddress(pifr.IP); cmd.setStorageMacAddress(pifr.MAC); cmd.setStorageNetmask(pifr.netmask); } } if (_host.getStoragePif2() != null) { pif = PIF.getByUuid(conn, _host.getStoragePif2()); pifr = pif.getRecord(conn); if (pifr.IP != null && pifr.IP.length() > 0) { cmd.setStorageIpAddressDeux(pifr.IP); cmd.setStorageMacAddressDeux(pifr.MAC); cmd.setStorageNetmaskDeux(pifr.netmask); } } final Map<String, String> configs = hr.otherConfig; cmd.setIqn(configs.get("iscsi_iqn")); cmd.setPod(_pod); cmd.setVersion(CitrixResourceBase.class.getPackage().getImplementationVersion()); try { final String cmdLine = "xe sm-list | grep \"resigning of duplicates\""; final XenServerUtilitiesHelper xenServerUtilitiesHelper = getXenServerUtilitiesHelper(); Pair<Boolean, String> result = xenServerUtilitiesHelper.executeSshWrapper(_host.getIp(), 22, _username, null, getPwdFromQueue(), cmdLine); boolean supportsClonedVolumes = result != null && result.first() != null && result.first() && result.second() != null && result.second().length() > 0; cmd.setSupportsClonedVolumes(supportsClonedVolumes); } catch (NumberFormatException ex) { s_logger.warn("Issue sending 'xe sm-list' via SSH to XenServer host: " + ex.getMessage()); } } catch (final XmlRpcException e) { throw new CloudRuntimeException("XML RPC Exception: " + e.getMessage(), e); } catch (final XenAPIException e) { throw new CloudRuntimeException("XenAPIException: " + e.toString(), e); } catch (final Exception e) { throw new CloudRuntimeException("Exception: " + e.toString(), e); } } protected void finalizeVmMetaData(final VM vm, final Connection conn, final VirtualMachineTO vmSpec) throws Exception { final Map<String, String> details = vmSpec.getDetails(); if (details != null) { final String platformstring = details.get("platform"); if (platformstring != null && !platformstring.isEmpty()) { final Map<String, String> platform = StringUtils.stringToMap(platformstring); vm.setPlatform(conn, platform); } else { final String timeoffset = details.get("timeoffset"); if (timeoffset != null) { final Map<String, String> platform = vm.getPlatform(conn); platform.put("timeoffset", timeoffset); vm.setPlatform(conn, platform); } final String coresPerSocket = details.get("cpu.corespersocket"); if (coresPerSocket != null) { final Map<String, String> platform = vm.getPlatform(conn); platform.put("cores-per-socket", coresPerSocket); vm.setPlatform(conn, platform); } } if (!BootloaderType.CD.equals(vmSpec.getBootloader())) { final String xenservertoolsversion = details.get("hypervisortoolsversion"); if ((xenservertoolsversion == null || !xenservertoolsversion.equalsIgnoreCase("xenserver61")) && vmSpec.getGpuDevice() == null) { final Map<String, String> platform = vm.getPlatform(conn); platform.remove("device_id"); vm.setPlatform(conn, platform); } } } } /** * This method just creates a XenServer network following the tunnel network * naming convention */ public synchronized Network findOrCreateTunnelNetwork(final Connection conn, final String nwName) { try { Network nw = null; final Network.Record rec = new Network.Record(); final Set<Network> networks = Network.getByNameLabel(conn, nwName); if (networks.size() == 0) { rec.nameDescription = "tunnel network id# " + nwName; rec.nameLabel = nwName; // Initialize the ovs-host-setup to avoid error when doing // get-param in plugin final Map<String, String> otherConfig = new HashMap<String, String>(); otherConfig.put("ovs-host-setup", ""); // Mark 'internal network' as shared so bridge gets // automatically created on each host in the cluster // when VM with vif connected to this internal network is // started otherConfig.put("assume_network_is_shared", "true"); rec.otherConfig = otherConfig; nw = Network.create(conn, rec); s_logger.debug("### XenServer network for tunnels created:" + nwName); } else { nw = networks.iterator().next(); s_logger.debug("XenServer network for tunnels found:" + nwName); } return nw; } catch (final Exception e) { s_logger.warn("createTunnelNetwork failed", e); return null; } } void forceShutdownVM(final Connection conn, final VM vm) { try { final Long domId = vm.getDomid(conn); callHostPlugin(conn, "vmopspremium", "forceShutdownVM", "domId", domId.toString()); vm.powerStateReset(conn); vm.destroy(conn); } catch (final Exception e) { final String msg = "forceShutdown failed due to " + e.toString(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg); } } protected String generateTimeStamp() { return new StringBuilder("CsCreateTime-").append(System.currentTimeMillis()).append("-").append(Rand.nextInt(Integer.MAX_VALUE)).toString(); } @Override public IAgentControl getAgentControl() { return _agentControl; } protected String getArgsString(final Map<String, String> args) { final StringBuilder argString = new StringBuilder(); for (final Map.Entry<String, String> arg : args.entrySet()) { argString.append(arg.getKey() + ": " + arg.getValue() + ", "); } return argString.toString(); } @Override public Map<String, Object> getConfigParams() { return null; } public Connection getConnection() { return ConnPool.connect(_host.getUuid(), _host.getPool(), _host.getIp(), _username, _password, _wait); } protected Pair<VM, VM.Record> getControlDomain(final Connection conn) throws XenAPIException, XmlRpcException { final Host host = Host.getByUuid(conn, _host.getUuid()); Set<VM> vms = null; vms = host.getResidentVMs(conn); for (final VM vm : vms) { if (vm.getIsControlDomain(conn)) { return new Pair<VM, VM.Record>(vm, vm.getRecord(conn)); } } throw new CloudRuntimeException("Com'on no control domain? What the crap?!#@!##$@"); } protected VIF getCorrectVif(final Connection conn, final VM router, final IpAddressTO ip) throws XmlRpcException, XenAPIException { final NicTO nic = new NicTO(); nic.setType(ip.getTrafficType()); nic.setName(ip.getNetworkName()); if (ip.getBroadcastUri() == null) { nic.setBroadcastType(BroadcastDomainType.Native); } else { final URI uri = BroadcastDomainType.fromString(ip.getBroadcastUri()); nic.setBroadcastType(BroadcastDomainType.getSchemeValue(uri)); nic.setBroadcastUri(uri); } final Network network = getNetwork(conn, nic); // Determine the correct VIF on DomR to associate/disassociate the // IP address with final Set<VIF> routerVIFs = router.getVIFs(conn); for (final VIF vif : routerVIFs) { final Network vifNetwork = vif.getNetwork(conn); if (vifNetwork.getUuid(conn).equals(network.getUuid(conn))) { return vif; } } return null; } protected VIF getCorrectVif(final Connection conn, final VM router, final Network network) throws XmlRpcException, XenAPIException { final Set<VIF> routerVIFs = router.getVIFs(conn); for (final VIF vif : routerVIFs) { final Network vifNetwork = vif.getNetwork(conn); if (vifNetwork.getUuid(conn).equals(network.getUuid(conn))) { return vif; } } return null; } @Override public PingCommand getCurrentStatus(final long id) { try { if (!pingXAPI()) { Thread.sleep(1000); if (!pingXAPI()) { s_logger.warn("can not ping xenserver " + _host.getUuid()); return null; } } final Connection conn = getConnection(); if (!_canBridgeFirewall && !_isOvs) { return new PingRoutingCommand(getType(), id, getHostVmStateReport(conn)); } else if (_isOvs) { final List<Pair<String, Long>> ovsStates = ovsFullSyncStates(); return new PingRoutingWithOvsCommand(getType(), id, getHostVmStateReport(conn), ovsStates); } else { final HashMap<String, Pair<Long, Long>> nwGrpStates = syncNetworkGroups(conn, id); return new PingRoutingWithNwGroupsCommand(getType(), id, getHostVmStateReport(conn), nwGrpStates); } } catch (final Exception e) { s_logger.warn("Unable to get current status", e); return null; } } protected double getDataAverage(final Node dataNode, final int col, final int numRows) { double value = 0; final double dummy = 0; int numRowsUsed = 0; for (int row = 0; row < numRows; row++) { final Node data = dataNode.getChildNodes().item(numRows - 1 - row).getChildNodes().item(col + 1); final Double currentDataAsDouble = Double.valueOf(getXMLNodeValue(data)); if (!currentDataAsDouble.equals(Double.NaN)) { numRowsUsed += 1; value += currentDataAsDouble; } } if (numRowsUsed == 0) { if (!Double.isInfinite(value) && !Double.isNaN(value)) { return value; } else { s_logger.warn("Found an invalid value (infinity/NaN) in getDataAverage(), numRows=0"); return dummy; } } else { if (!Double.isInfinite(value / numRowsUsed) && !Double.isNaN(value / numRowsUsed)) { return value / numRowsUsed; } else { s_logger.warn("Found an invalid value (infinity/NaN) in getDataAverage(), numRows>0"); return dummy; } } } public HashMap<String, HashMap<String, VgpuTypesInfo>> getGPUGroupDetails(final Connection conn) throws XenAPIException, XmlRpcException { return null; } protected String getGuestOsType(String platformEmulator) { if (org.apache.commons.lang.StringUtils.isBlank(platformEmulator)) { s_logger.debug("no guest OS type, start it as HVM guest"); platformEmulator = "Other install media"; } return platformEmulator; } public XsHost getHost() { return _host; } public int getMigrateWait() { return _migratewait; } public StorageSubsystemCommandHandler getStorageHandler() { return storageHandler; } protected boolean getHostInfo(final Connection conn) throws IllegalArgumentException { try { final Host myself = Host.getByUuid(conn, _host.getUuid()); Set<HostCpu> hcs = null; for (int i = 0; i < 10; i++) { hcs = myself.getHostCPUs(conn); if (hcs != null) { _host.setCpus(hcs.size()); if (_host.getCpus() > 0) { break; } } Thread.sleep(5000); } if (_host.getCpus() <= 0) { throw new CloudRuntimeException("Cannot get the numbers of cpu from XenServer host " + _host.getIp()); } final Map<String, String> cpuInfo = myself.getCpuInfo(conn); if (cpuInfo.get("socket_count") != null) { _host.setCpuSockets(Integer.parseInt(cpuInfo.get("socket_count"))); } // would hcs be null we would have thrown an exception on condition // (_host.getCpus() <= 0) by now for (final HostCpu hc : hcs) { _host.setSpeed(hc.getSpeed(conn).intValue()); break; } final Host.Record hr = myself.getRecord(conn); _host.setProductVersion(CitrixHelper.getProductVersion(hr)); final XsLocalNetwork privateNic = getManagementNetwork(conn); _privateNetworkName = privateNic.getNetworkRecord(conn).nameLabel; _host.setPrivatePif(privateNic.getPifRecord(conn).uuid); _host.setPrivateNetwork(privateNic.getNetworkRecord(conn).uuid); _host.setSystemvmisouuid(null); XsLocalNetwork guestNic = null; if (_guestNetworkName != null && !_guestNetworkName.equals(_privateNetworkName)) { guestNic = getNetworkByName(conn, _guestNetworkName); if (guestNic == null) { s_logger.warn("Unable to find guest network " + _guestNetworkName); throw new IllegalArgumentException("Unable to find guest network " + _guestNetworkName + " for host " + _host.getIp()); } } else { guestNic = privateNic; _guestNetworkName = _privateNetworkName; } _host.setGuestNetwork(guestNic.getNetworkRecord(conn).uuid); _host.setGuestPif(guestNic.getPifRecord(conn).uuid); XsLocalNetwork publicNic = null; if (_publicNetworkName != null && !_publicNetworkName.equals(_guestNetworkName)) { publicNic = getNetworkByName(conn, _publicNetworkName); if (publicNic == null) { s_logger.warn("Unable to find public network " + _publicNetworkName + " for host " + _host.getIp()); throw new IllegalArgumentException("Unable to find public network " + _publicNetworkName + " for host " + _host.getIp()); } } else { publicNic = guestNic; _publicNetworkName = _guestNetworkName; } _host.setPublicPif(publicNic.getPifRecord(conn).uuid); _host.setPublicNetwork(publicNic.getNetworkRecord(conn).uuid); if (_storageNetworkName1 == null) { _storageNetworkName1 = _guestNetworkName; } XsLocalNetwork storageNic1 = null; storageNic1 = getNetworkByName(conn, _storageNetworkName1); if (storageNic1 == null) { s_logger.warn("Unable to find storage network " + _storageNetworkName1 + " for host " + _host.getIp()); throw new IllegalArgumentException("Unable to find storage network " + _storageNetworkName1 + " for host " + _host.getIp()); } else { _host.setStorageNetwork1(storageNic1.getNetworkRecord(conn).uuid); _host.setStoragePif1(storageNic1.getPifRecord(conn).uuid); } XsLocalNetwork storageNic2 = null; if (_storageNetworkName2 != null) { storageNic2 = getNetworkByName(conn, _storageNetworkName2); if (storageNic2 != null) { _host.setStoragePif2(storageNic2.getPifRecord(conn).uuid); } } s_logger.info("XenServer Version is " + _host.getProductVersion() + " for host " + _host.getIp()); s_logger.info("Private Network is " + _privateNetworkName + " for host " + _host.getIp()); s_logger.info("Guest Network is " + _guestNetworkName + " for host " + _host.getIp()); s_logger.info("Public Network is " + _publicNetworkName + " for host " + _host.getIp()); return true; } catch (final XenAPIException e) { s_logger.warn("Unable to get host information for " + _host.getIp(), e); return false; } catch (final Exception e) { s_logger.warn("Unable to get host information for " + _host.getIp(), e); return false; } } public HostStatsEntry getHostStats(final Connection conn, final GetHostStatsCommand cmd, final String hostGuid, final long hostId) { final HostStatsEntry hostStats = new HostStatsEntry(hostId, 0, 0, 0, "host", 0, 0, 0, 0); final Object[] rrdData = getRRDData(conn, 1); // call rrd method with 1 // for host if (rrdData == null) { return null; } final Integer numRows = (Integer)rrdData[0]; final Integer numColumns = (Integer)rrdData[1]; final Node legend = (Node)rrdData[2]; final Node dataNode = (Node)rrdData[3]; final NodeList legendChildren = legend.getChildNodes(); for (int col = 0; col < numColumns; col++) { if (legendChildren == null || legendChildren.item(col) == null) { continue; } final String columnMetadata = getXMLNodeValue(legendChildren.item(col)); if (columnMetadata == null) { continue; } final String[] columnMetadataList = columnMetadata.split(":"); if (columnMetadataList.length != 4) { continue; } final String type = columnMetadataList[1]; final String param = columnMetadataList[3]; if (type.equalsIgnoreCase("host")) { if (param.matches("pif_eth0_rx")) { hostStats.setNetworkReadKBs(getDataAverage(dataNode, col, numRows) / 1000); } else if (param.matches("pif_eth0_tx")) { hostStats.setNetworkWriteKBs(getDataAverage(dataNode, col, numRows) / 1000); } else if (param.contains("memory_total_kib")) { hostStats.setTotalMemoryKBs(getDataAverage(dataNode, col, numRows)); } else if (param.contains("memory_free_kib")) { hostStats.setFreeMemoryKBs(getDataAverage(dataNode, col, numRows)); } else if (param.matches("cpu_avg")) { // hostStats.setNumCpus(hostStats.getNumCpus() + 1); hostStats.setCpuUtilization(hostStats.getCpuUtilization() + getDataAverage(dataNode, col, numRows)); } /* * if (param.contains("loadavg")) { * hostStats.setAverageLoad((hostStats.getAverageLoad() + * getDataAverage(dataNode, col, numRows))); } */ } } // add the host cpu utilization /* * if (hostStats.getNumCpus() != 0) { * hostStats.setCpuUtilization(hostStats.getCpuUtilization() / * hostStats.getNumCpus()); s_logger.debug("Host cpu utilization " + * hostStats.getCpuUtilization()); } */ return hostStats; } protected HashMap<String, HostVmStateReportEntry> getHostVmStateReport(final Connection conn) { final HashMap<String, HostVmStateReportEntry> vmStates = new HashMap<String, HostVmStateReportEntry>(); Map<VM, VM.Record> vm_map = null; for (int i = 0; i < 2; i++) { try { vm_map = VM.getAllRecords(conn); break; } catch (final Throwable e) { s_logger.warn("Unable to get vms", e); } try { Thread.sleep(1000); } catch (final InterruptedException ex) { } } if (vm_map == null) { return vmStates; } for (final VM.Record record : vm_map.values()) { if (record.isControlDomain || record.isASnapshot || record.isATemplate) { continue; // Skip DOM0 } final VmPowerState ps = record.powerState; final Host host = record.residentOn; String host_uuid = null; if (!isRefNull(host)) { try { host_uuid = host.getUuid(conn); } catch (final BadServerResponse e) { s_logger.error("Failed to get host uuid for host " + host.toWireString(), e); } catch (final XenAPIException e) { s_logger.error("Failed to get host uuid for host " + host.toWireString(), e); } catch (final XmlRpcException e) { s_logger.error("Failed to get host uuid for host " + host.toWireString(), e); } if (host_uuid.equalsIgnoreCase(_host.getUuid())) { vmStates.put(record.nameLabel, new HostVmStateReportEntry(convertToPowerState(ps), host_uuid)); } } } return vmStates; } public SR getIscsiSR(final Connection conn, final String srNameLabel, final String target, String path, final String chapInitiatorUsername, final String chapInitiatorPassword, final boolean ignoreIntroduceException) { return getIscsiSR(conn, srNameLabel, target, path, chapInitiatorUsername, chapInitiatorPassword, false, SRType.LVMOISCSI.toString(), ignoreIntroduceException); } public SR getIscsiSR(final Connection conn, final String srNameLabel, final String target, String path, final String chapInitiatorUsername, final String chapInitiatorPassword, final boolean resignature, final boolean ignoreIntroduceException) { return getIscsiSR(conn, srNameLabel, target, path, chapInitiatorUsername, chapInitiatorPassword, resignature, SRType.LVMOISCSI.toString(), ignoreIntroduceException); } public SR getIscsiSR(final Connection conn, final String srNameLabel, final String target, String path, final String chapInitiatorUsername, final String chapInitiatorPassword, final boolean resignature, final String srType, final boolean ignoreIntroduceException) { synchronized (srNameLabel.intern()) { final Map<String, String> deviceConfig = new HashMap<String, String>(); try { if (path.endsWith("/")) { path = path.substring(0, path.length() - 1); } final String tmp[] = path.split("/"); if (tmp.length != 3) { final String msg = "Wrong iscsi path " + path + " it should be /targetIQN/LUN"; s_logger.warn(msg); throw new CloudRuntimeException(msg); } final String targetiqn = tmp[1].trim(); final String lunid = tmp[2].trim(); String scsiid = ""; //Throws an exception if SR already exists and is attached checkIfIscsiSrExisits(conn, srNameLabel, target, targetiqn, lunid); // We now know the SR is not attached to the XenServer. We probe the // LUN to see if an SR was already exists on it, if so, we just // attach it or else we create a brand new SR deviceConfig.put("target", target); deviceConfig.put("targetIQN", targetiqn); if (StringUtils.isNotBlank(chapInitiatorUsername) && StringUtils.isNotBlank(chapInitiatorPassword)) { deviceConfig.put("chapuser", chapInitiatorUsername); deviceConfig.put("chappassword", chapInitiatorPassword); } final Host host = Host.getByUuid(conn, _host.getUuid()); final Map<String, String> smConfig = new HashMap<String, String>(); SR sr = null; String pooluuid = null; if (SRType.LVMOISCSI.equals(srType)) { scsiid = probeScisiId(conn, host, deviceConfig, srType, srNameLabel, lunid, smConfig); deviceConfig.put("SCSIid", scsiid); String result = SR.probe(conn, host, deviceConfig, srType, smConfig); if (result.indexOf("<UUID>") != -1) { pooluuid = result.substring(result.indexOf("<UUID>") + 6, result.indexOf("</UUID>")).trim(); } } if (pooluuid == null || pooluuid.length() != 36) { sr = SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, srType, "user", true, smConfig); } else { if (resignature) { // We resignature the SR for managed storage if needed. At the end of this // we have an SR which is ready to be attached. For VHDoISCSI SR, // we don't need to resignature pooluuid = resignatureIscsiSr(conn, host, deviceConfig, srNameLabel, smConfig); } sr = introduceAndPlugIscsiSr(conn, pooluuid, srNameLabel, srType, smConfig, deviceConfig, ignoreIntroduceException); } sr.scan(conn); return sr; } catch (final XenAPIException e) { final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.toString(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } catch (final Exception e) { final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.getMessage(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } } } private SR introduceAndPlugIscsiSr(Connection conn, String pooluuid, String srNameLabel, String type, Map<String, String> smConfig, Map<String, String> deviceConfig, boolean ignoreIntroduceException) throws XmlRpcException, XenAPIException { SR sr = null; try { sr = SR.introduce(conn, pooluuid, srNameLabel, srNameLabel, type, "user", true, smConfig); } catch (final XenAPIException ex) { if (ignoreIntroduceException) { return sr; } throw ex; } final Set<Host> setHosts = Host.getAll(conn); if (setHosts == null) { final String msg = "Unable to create iSCSI SR " + deviceConfig + " due to hosts not available."; s_logger.warn(msg); throw new CloudRuntimeException(msg); } for (final Host currentHost : setHosts) { final PBD.Record rec = new PBD.Record(); rec.deviceConfig = deviceConfig; rec.host = currentHost; rec.SR = sr; final PBD pbd = PBD.create(conn, rec); pbd.plug(conn); } return sr; } private String resignatureIscsiSr(Connection conn, Host host, Map<String, String> deviceConfig, String srNameLabel, Map<String, String> smConfig) throws XmlRpcException, XenAPIException { String pooluuid; try { SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, SRType.RELVMOISCSI.toString(), "user", true, smConfig); // The successful outcome of SR.create (right above) is to throw an exception of type XenAPIException (with expected // toString() text) after resigning the metadata (we indicated to perform a resign by passing in SRType.RELVMOISCSI.toString()). // That being the case, if this CloudRuntimeException statement is executed, there appears to have been some kind // of failure in the execution of the above SR.create (resign) method. throw new CloudRuntimeException("Problem resigning the metadata"); } catch (XenAPIException ex) { String msg = ex.toString(); if (!msg.contains("successfully resigned")) { throw ex; } String type = SRType.LVMOISCSI.toString(); String result = SR.probe(conn, host, deviceConfig, type, smConfig); pooluuid = null; if (result.indexOf("<UUID>") != -1) { pooluuid = result.substring(result.indexOf("<UUID>") + 6, result.indexOf("</UUID>")).trim(); } if (pooluuid == null || pooluuid.length() != 36) { throw new CloudRuntimeException("Non-existent or invalid SR UUID"); } } return pooluuid; } private void checkIfIscsiSrExisits(Connection conn, String srNameLabel, String target, String targetiqn, String lunid) throws XenAPIException, XmlRpcException { final Set<SR> srs = SR.getByNameLabel(conn, srNameLabel); for (final SR sr : srs) { if (!(SRType.LVMOISCSI.equals(sr.getType(conn)))) { continue; } final Set<PBD> pbds = sr.getPBDs(conn); if (pbds.isEmpty()) { continue; } final PBD pbd = pbds.iterator().next(); final Map<String, String> dc = pbd.getDeviceConfig(conn); if (dc == null) { continue; } if (dc.get("target") == null) { continue; } if (dc.get("targetIQN") == null) { continue; } if (dc.get("lunid") == null) { continue; } if (target.equals(dc.get("target")) && targetiqn.equals(dc.get("targetIQN")) && lunid.equals(dc.get("lunid"))) { throw new CloudRuntimeException("There is a SR using the same configuration target:" + dc.get("target") + ", targetIQN:" + dc.get("targetIQN") + ", lunid:" + dc.get("lunid") + " for pool " + srNameLabel + "on host:" + _host.getUuid()); } } } private String probeScisiId(Connection conn, Host host, Map<String, String> deviceConfig, String type, String srNameLabel, String lunid, Map<String, String> smConfig) throws XenAPIException, XmlRpcException { String scsiid = null; try { SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, type, "user", true, smConfig); } catch (final XenAPIException e) { final String errmsg = e.toString(); if (errmsg.contains("SR_BACKEND_FAILURE_107")) { final String lun[] = errmsg.split("<LUN>"); boolean found = false; for (int i = 1; i < lun.length; i++) { final int blunindex = lun[i].indexOf("<LUNid>") + 7; final int elunindex = lun[i].indexOf("</LUNid>"); String ilun = lun[i].substring(blunindex, elunindex); ilun = ilun.trim(); if (ilun.equals(lunid)) { final int bscsiindex = lun[i].indexOf("<SCSIid>") + 8; final int escsiindex = lun[i].indexOf("</SCSIid>"); scsiid = lun[i].substring(bscsiindex, escsiindex); scsiid = scsiid.trim(); found = true; break; } } if (!found) { final String msg = "can not find LUN " + lunid + " in " + errmsg; s_logger.warn(msg); throw new CloudRuntimeException(msg); } } else { final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.toString(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } } return scsiid; } public SR getISOSRbyVmName(final Connection conn, final String vmName) { try { final Set<SR> srs = SR.getByNameLabel(conn, vmName + "-ISO"); if (srs.size() == 0) { return null; } else if (srs.size() == 1) { return srs.iterator().next(); } else { final String msg = "getIsoSRbyVmName failed due to there are more than 1 SR having same Label"; s_logger.warn(msg); } } catch (final XenAPIException e) { final String msg = "getIsoSRbyVmName failed due to " + e.toString(); s_logger.warn(msg, e); } catch (final Exception e) { final String msg = "getIsoSRbyVmName failed due to " + e.getMessage(); s_logger.warn(msg, e); } return null; } public VDI getIsoVDIByURL(final Connection conn, final String vmName, final String isoURL) { SR isoSR = null; String mountpoint = null; if (isoURL.startsWith("xs-tools")) { try { final String actualIsoURL = getActualIsoTemplate(conn); final Set<VDI> vdis = VDI.getByNameLabel(conn, actualIsoURL); if (vdis.isEmpty()) { throw new CloudRuntimeException("Could not find ISO with URL: " + actualIsoURL); } return vdis.iterator().next(); } catch (final XenAPIException e) { throw new CloudRuntimeException("Unable to get pv iso: " + isoURL + " due to " + e.toString()); } catch (final Exception e) { throw new CloudRuntimeException("Unable to get pv iso: " + isoURL + " due to " + e.toString()); } } final int index = isoURL.lastIndexOf("/"); mountpoint = isoURL.substring(0, index); URI uri; try { uri = new URI(mountpoint); } catch (final URISyntaxException e) { throw new CloudRuntimeException("isoURL is wrong: " + isoURL); } isoSR = getISOSRbyVmName(conn, vmName); if (isoSR == null) { isoSR = createIsoSRbyURI(conn, uri, vmName, false); } final String isoName = isoURL.substring(index + 1); final VDI isoVDI = getVDIbyLocationandSR(conn, isoName, isoSR); if (isoVDI != null) { return isoVDI; } else { throw new CloudRuntimeException("Could not find ISO with URL: " + isoURL); } } /** * Retrieve the actual ISO 'name-label' to be used. * We based our decision on XenServer version. * <ul> * <li> for XenServer 7.0+, we use {@value #xenServer70plusGuestToolsName}; * <li> for versions before 7.0, we use {@value #xenServerBefore70GuestToolsName}. * </ul> * * For XCP we always use {@value #xenServerBefore70GuestToolsName}. */ protected String getActualIsoTemplate(Connection conn) throws XenAPIException, XmlRpcException { Host host = Host.getByUuid(conn, _host.getUuid()); Host.Record record = host.getRecord(conn); String xenBrand = record.softwareVersion.get("product_brand"); String xenVersion = record.softwareVersion.get("product_version"); String[] items = xenVersion.split("\\."); if ((xenBrand.equals("XenServer") || xenBrand.equals("XCP-ng")) && Integer.parseInt(items[0]) >= 7) { return xenServer70plusGuestToolsName; } return xenServerBefore70GuestToolsName; } public String getLabel() { final Connection conn = getConnection(); final String result = callHostPlugin(conn, "ovstunnel", "getLabel"); return result; } public String getLowestAvailableVIFDeviceNum(final Connection conn, final VM vm) { String vmName = ""; try { vmName = vm.getNameLabel(conn); final List<Integer> usedDeviceNums = new ArrayList<Integer>(); final Set<VIF> vifs = vm.getVIFs(conn); final Iterator<VIF> vifIter = vifs.iterator(); while (vifIter.hasNext()) { final VIF vif = vifIter.next(); try { final String deviceId = vif.getDevice(conn); if (vm.getIsControlDomain(conn) || vif.getCurrentlyAttached(conn)) { usedDeviceNums.add(Integer.valueOf(deviceId)); } else { s_logger.debug("Found unplugged VIF " + deviceId + " in VM " + vmName + " destroy it"); vif.destroy(conn); } } catch (final NumberFormatException e) { final String msg = "Obtained an invalid value for an allocated VIF device number for VM: " + vmName; s_logger.debug(msg, e); throw new CloudRuntimeException(msg); } } for (Integer i = 0; i < _maxNics; i++) { if (!usedDeviceNums.contains(i)) { s_logger.debug("Lowest available Vif device number: " + i + " for VM: " + vmName); return i.toString(); } } } catch (final XmlRpcException e) { final String msg = "Caught XmlRpcException: " + e.getMessage(); s_logger.warn(msg, e); } catch (final XenAPIException e) { final String msg = "Caught XenAPIException: " + e.toString(); s_logger.warn(msg, e); } throw new CloudRuntimeException("Could not find available VIF slot in VM with name: " + vmName); } protected XsLocalNetwork getManagementNetwork(final Connection conn) throws XmlRpcException, XenAPIException { PIF mgmtPif = null; PIF.Record mgmtPifRec = null; final Host host = Host.getByUuid(conn, _host.getUuid()); final Set<PIF> hostPifs = host.getPIFs(conn); for (final PIF pif : hostPifs) { final PIF.Record rec = pif.getRecord(conn); if (rec.management) { if (rec.VLAN != null && rec.VLAN != -1) { final String msg = new StringBuilder("Unsupported configuration. Management network is on a VLAN. host=").append(_host.getUuid()).append("; pif=").append(rec.uuid) .append("; vlan=").append(rec.VLAN).toString(); s_logger.warn(msg); throw new CloudRuntimeException(msg); } if (s_logger.isDebugEnabled()) { s_logger.debug("Management network is on pif=" + rec.uuid); } mgmtPif = pif; mgmtPifRec = rec; break; } } if (mgmtPif == null) { final String msg = "Unable to find management network for " + _host.getUuid(); s_logger.warn(msg); throw new CloudRuntimeException(msg); } final Bond bond = mgmtPifRec.bondSlaveOf; if (!isRefNull(bond)) { final String msg = "Management interface is on slave(" + mgmtPifRec.uuid + ") of bond(" + bond.getUuid(conn) + ") on host(" + _host.getUuid() + "), please move management interface to bond!"; s_logger.warn(msg); throw new CloudRuntimeException(msg); } final Network nk = mgmtPifRec.network; final Network.Record nkRec = nk.getRecord(conn); return new XsLocalNetwork(this, nk, nkRec, mgmtPif, mgmtPifRec); } @Override public String getName() { return _name; } public XsLocalNetwork getNativeNetworkForTraffic(final Connection conn, final TrafficType type, final String name) throws XenAPIException, XmlRpcException { if (name != null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Looking for network named " + name); } return getNetworkByName(conn, name); } if (type == TrafficType.Guest) { return new XsLocalNetwork(this, Network.getByUuid(conn, _host.getGuestNetwork()), null, PIF.getByUuid(conn, _host.getGuestPif()), null); } else if (type == TrafficType.Control) { setupLinkLocalNetwork(conn); return new XsLocalNetwork(this, Network.getByUuid(conn, _host.getLinkLocalNetwork())); } else if (type == TrafficType.Management) { return new XsLocalNetwork(this, Network.getByUuid(conn, _host.getPrivateNetwork()), null, PIF.getByUuid(conn, _host.getPrivatePif()), null); } else if (type == TrafficType.Public) { return new XsLocalNetwork(this, Network.getByUuid(conn, _host.getPublicNetwork()), null, PIF.getByUuid(conn, _host.getPublicPif()), null); } else if (type == TrafficType.Storage) { /* * TrafficType.Storage is for secondary storage, while * storageNetwork1 is for primary storage, we need better name here */ return new XsLocalNetwork(this, Network.getByUuid(conn, _host.getStorageNetwork1()), null, PIF.getByUuid(conn, _host.getStoragePif1()), null); } throw new CloudRuntimeException("Unsupported network type: " + type); } public Network getNetwork(final Connection conn, final NicTO nic) throws XenAPIException, XmlRpcException { final String name = nic.getName(); final XsLocalNetwork network = getNativeNetworkForTraffic(conn, nic.getType(), name); if (network == null) { s_logger.error("Network is not configured on the backend for nic " + nic.toString()); throw new CloudRuntimeException("Network for the backend is not configured correctly for network broadcast domain: " + nic.getBroadcastUri()); } final URI uri = nic.getBroadcastUri(); final BroadcastDomainType type = nic.getBroadcastType(); if (uri != null && uri.toString().contains("untagged")) { return network.getNetwork(); } else if (uri != null && type == BroadcastDomainType.Vlan) { assert BroadcastDomainType.getSchemeValue(uri) == BroadcastDomainType.Vlan; final long vlan = Long.parseLong(BroadcastDomainType.getValue(uri)); return enableVlanNetwork(conn, vlan, network); } else if (type == BroadcastDomainType.Native || type == BroadcastDomainType.LinkLocal) { return network.getNetwork(); } else if (uri != null && type == BroadcastDomainType.Vswitch) { final String header = uri.toString().substring(Networks.BroadcastDomainType.Vswitch.scheme().length() + "://".length()); if (header.startsWith("vlan")) { _isOvs = true; return setupvSwitchNetwork(conn); } else { return findOrCreateTunnelNetwork(conn, getOvsTunnelNetworkName(uri.getAuthority())); } } else if (type == BroadcastDomainType.Storage) { if (uri == null) { return network.getNetwork(); } else { final long vlan = Long.parseLong(BroadcastDomainType.getValue(uri)); return enableVlanNetwork(conn, vlan, network); } } else if (type == BroadcastDomainType.Lswitch) { // Nicira Logical Switch return network.getNetwork(); } else if (uri != null && type == BroadcastDomainType.Pvlan) { assert BroadcastDomainType.getSchemeValue(uri) == BroadcastDomainType.Pvlan; // should we consider moving this NetUtils method to // BroadcastDomainType? final long vlan = Long.parseLong(NetUtils.getPrimaryPvlanFromUri(uri)); return enableVlanNetwork(conn, vlan, network); } throw new CloudRuntimeException("Unable to support this type of network broadcast domain: " + nic.getBroadcastUri()); } /** * getNetworkByName() retrieves what the server thinks is the actual network * used by the XenServer host. This method should always be used to talk to * retrieve a network by the name. The reason is because of the problems in * using the name label as the way to find the Network. * * To see how we are working around these problems, take a look at * enableVlanNetwork(). The following description assumes you have looked at * the description on that method. * * In order to understand this, we have to see what type of networks are * within a XenServer that's under CloudStack control. * * - Native Networks: these are networks that are untagged on the XenServer * and are used to crate VLAN networks on. These are created by the user and * is assumed to be one per cluster. - VLAN Networks: these are dynamically * created by CloudStack and can have problems with duplicated names. - * LinkLocal Networks: these are dynamically created by CloudStack and can * also have problems with duplicated names but these don't have actual * PIFs. * * In order to speed to retrieval of a network, we do the following: - We * retrieve by the name. If only one network is retrieved, we assume we * retrieved the right network. - If more than one network is retrieved, we * check to see which one has the pif for the local host and use that. - If * a pif is not found, then we look at the tags and find the one with the * lowest timestamp. (See enableVlanNetwork()) * * @param conn * Xapi connection * @param name * name of the network * @return XsNic an object that contains network, network record, pif, and * pif record. * @throws XenAPIException * @throws XmlRpcException * * @see CitrixResourceBase#enableVlanNetwork */ public XsLocalNetwork getNetworkByName(final Connection conn, final String name) throws XenAPIException, XmlRpcException { final Set<Network> networks = Network.getByNameLabel(conn, name); if (networks.size() == 1) { return new XsLocalNetwork(this, networks.iterator().next(), null, null, null); } if (networks.size() == 0) { return null; } if (s_logger.isDebugEnabled()) { s_logger.debug("Found more than one network with the name " + name); } Network earliestNetwork = null; Network.Record earliestNetworkRecord = null; long earliestTimestamp = Long.MAX_VALUE; int earliestRandom = Integer.MAX_VALUE; for (final Network network : networks) { final XsLocalNetwork nic = new XsLocalNetwork(this, network); if (nic.getPif(conn) != null) { return nic; } final Network.Record record = network.getRecord(conn); if (record.tags != null) { for (final String tag : record.tags) { final Pair<Long, Integer> stamp = parseTimestamp(tag); if (stamp == null) { continue; } if (stamp.first() < earliestTimestamp || stamp.first() == earliestTimestamp && stamp.second() < earliestRandom) { earliestTimestamp = stamp.first(); earliestRandom = stamp.second(); earliestNetwork = network; earliestNetworkRecord = record; } } } } return earliestNetwork != null ? new XsLocalNetwork(this, earliestNetwork, earliestNetworkRecord, null, null) : null; } public long[] getNetworkStats(final Connection conn, final String privateIP) { final String result = networkUsage(conn, privateIP, "get", null); final long[] stats = new long[2]; if (result != null) { final String[] splitResult = result.split(":"); int i = 0; while (i < splitResult.length - 1) { stats[0] += Long.parseLong(splitResult[i++]); stats[1] += Long.parseLong(splitResult[i++]); } } return stats; } public SR getNfsSR(final Connection conn, final String poolid, final String uuid, final String server, String serverpath, final String pooldesc) { final Map<String, String> deviceConfig = new HashMap<String, String>(); try { serverpath = serverpath.replace("//", "/"); final Set<SR> srs = SR.getAll(conn); if (srs != null && !srs.isEmpty()) { for (final SR sr : srs) { if (!SRType.NFS.equals(sr.getType(conn))) { continue; } final Set<PBD> pbds = sr.getPBDs(conn); if (pbds.isEmpty()) { continue; } final PBD pbd = pbds.iterator().next(); final Map<String, String> dc = pbd.getDeviceConfig(conn); if (dc == null) { continue; } if (dc.get("server") == null) { continue; } if (dc.get("serverpath") == null) { continue; } if (server.equals(dc.get("server")) && serverpath.equals(dc.get("serverpath"))) { throw new CloudRuntimeException( "There is a SR using the same configuration server:" + dc.get("server") + ", serverpath:" + dc.get("serverpath") + " for pool " + uuid + " on host:" + _host.getUuid()); } } } deviceConfig.put("server", server); deviceConfig.put("serverpath", serverpath); final Host host = Host.getByUuid(conn, _host.getUuid()); final Map<String, String> smConfig = new HashMap<String, String>(); smConfig.put("nosubdir", "true"); final SR sr = SR.create(conn, host, deviceConfig, new Long(0), uuid, poolid, SRType.NFS.toString(), "user", true, smConfig); sr.scan(conn); return sr; } catch (final XenAPIException e) { throw new CloudRuntimeException("Unable to create NFS SR " + pooldesc, e); } catch (final XmlRpcException e) { throw new CloudRuntimeException("Unable to create NFS SR " + pooldesc, e); } } private String getOvsTunnelNetworkName(final String broadcastUri) { if (broadcastUri.contains(".")) { final String[] parts = broadcastUri.split("\\."); return "OVS-DR-VPC-Bridge" + parts[0]; } else { try { return "OVSTunnel" + broadcastUri; } catch (final Exception e) { return null; } } } protected List<File> getPatchFiles() { String patch = getPatchFilePath(); String patchfilePath = Script.findScript("", patch); if (patchfilePath == null) { throw new CloudRuntimeException("Unable to find patch file " + patch); } List<File> files = new ArrayList<File>(); files.add(new File(patchfilePath)); return files; } protected abstract String getPatchFilePath(); public String getPerfMon(final Connection conn, final Map<String, String> params, final int wait) { String result = null; try { result = callHostPluginAsync(conn, "vmopspremium", "asmonitor", 60, params); if (result != null) { return result; } } catch (final Exception e) { s_logger.error("Can not get performance monitor for AS due to ", e); } return null; } protected Object[] getRRDData(final Connection conn, final int flag) { /* * Note: 1 => called from host, hence host stats 2 => called from vm, * hence vm stats */ Document doc = null; try { doc = getStatsRawXML(conn, flag == 1 ? true : false); } catch (final Exception e1) { s_logger.warn("Error whilst collecting raw stats from plugin: ", e1); return null; } if (doc == null) { // stats are null when the host plugin call fails // (host down state) return null; } final NodeList firstLevelChildren = doc.getChildNodes(); final NodeList secondLevelChildren = firstLevelChildren.item(0).getChildNodes(); final Node metaNode = secondLevelChildren.item(0); final Node dataNode = secondLevelChildren.item(1); Integer numRows = 0; Integer numColumns = 0; Node legend = null; final NodeList metaNodeChildren = metaNode.getChildNodes(); for (int i = 0; i < metaNodeChildren.getLength(); i++) { final Node n = metaNodeChildren.item(i); if (n.getNodeName().equals("rows")) { numRows = Integer.valueOf(getXMLNodeValue(n)); } else if (n.getNodeName().equals("columns")) { numColumns = Integer.valueOf(getXMLNodeValue(n)); } else if (n.getNodeName().equals("legend")) { legend = n; } } return new Object[] {numRows, numColumns, legend, dataNode}; } @Override public int getRunLevel() { return 0; } protected SR getSRByNameLabelandHost(final Connection conn, final String name) throws BadServerResponse, XenAPIException, XmlRpcException { final Set<SR> srs = SR.getByNameLabel(conn, name); SR ressr = null; for (final SR sr : srs) { Set<PBD> pbds; pbds = sr.getPBDs(conn); for (final PBD pbd : pbds) { final PBD.Record pbdr = pbd.getRecord(conn); if (pbdr.host != null && pbdr.host.getUuid(conn).equals(_host.getUuid())) { if (!pbdr.currentlyAttached) { pbd.plug(conn); } ressr = sr; break; } } } return ressr; } private long getStaticMax(final String os, final boolean b, final long dynamicMinRam, final long dynamicMaxRam, final long recommendedValue) { if (recommendedValue == 0) { s_logger.warn("No recommended value found for dynamic max, setting static max and dynamic max equal"); return dynamicMaxRam; } final long staticMax = Math.min(recommendedValue, 4l * dynamicMinRam); // XS // constraint // for // stability if (dynamicMaxRam > staticMax) { // XS contraint that dynamic max <= // static max s_logger.warn("dynamixMax " + dynamicMaxRam + " cant be greater than static max " + staticMax + ", can lead to stability issues. Setting static max as much as dynamic max "); return dynamicMaxRam; } return staticMax; } private long getStaticMin(final String os, final boolean b, final long dynamicMinRam, final long dynamicMaxRam, final long recommendedValue) { if (recommendedValue == 0) { s_logger.warn("No recommended value found for dynamic min"); return dynamicMinRam; } if (dynamicMinRam < recommendedValue) { // XS contraint that dynamic min // > static min s_logger.warn("Vm is set to dynamixMin " + dynamicMinRam + " less than the recommended static min " + recommendedValue + ", could lead to stability issues"); } return dynamicMinRam; } protected Document getStatsRawXML(final Connection conn, final boolean host) { final Date currentDate = new Date(); String urlStr = "http://" + _host.getIp() + "/rrd_updates?"; urlStr += "session_id=" + conn.getSessionReference(); urlStr += "&host=" + (host ? "true" : "false"); urlStr += "&cf=" + _consolidationFunction; urlStr += "&interval=" + _pollingIntervalInSeconds; urlStr += "&start=" + (currentDate.getTime() / 1000 - 1000 - 100); URL url; BufferedReader in = null; try { url = new URL(urlStr); url.openConnection(); final URLConnection uc = url.openConnection(); in = new BufferedReader(new InputStreamReader(uc.getInputStream())); final InputSource statsSource = new InputSource(in); return DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(statsSource); } catch (final MalformedURLException e) { s_logger.warn("Malformed URL? come on...." + urlStr); return null; } catch (final IOException e) { s_logger.warn("Problems getting stats using " + urlStr, e); return null; } catch (final SAXException e) { s_logger.warn("Problems getting stats using " + urlStr, e); return null; } catch (final ParserConfigurationException e) { s_logger.warn("Problems getting stats using " + urlStr, e); return null; } finally { if (in != null) { try { in.close(); } catch (final IOException e) { s_logger.warn("Unable to close the buffer ", e); } } } } public SR getStorageRepository(final Connection conn, final String srNameLabel) { Set<SR> srs; try { srs = SR.getByNameLabel(conn, srNameLabel); } catch (final XenAPIException e) { throw new CloudRuntimeException("Unable to get SR " + srNameLabel + " due to " + e.toString(), e); } catch (final Exception e) { throw new CloudRuntimeException("Unable to get SR " + srNameLabel + " due to " + e.getMessage(), e); } if (srs.size() > 1) { throw new CloudRuntimeException("More than one storage repository was found for pool with uuid: " + srNameLabel); } else if (srs.size() == 1) { final SR sr = srs.iterator().next(); if (s_logger.isDebugEnabled()) { s_logger.debug("SR retrieved for " + srNameLabel); } if (checkSR(conn, sr)) { return sr; } throw new CloudRuntimeException("SR check failed for storage pool: " + srNameLabel + "on host:" + _host.getUuid()); } else { throw new CloudRuntimeException("Can not see storage pool: " + srNameLabel + " from on host:" + _host.getUuid()); } } protected Storage.StorageResourceType getStorageResourceType() { return Storage.StorageResourceType.STORAGE_POOL; } @Override public Type getType() { return com.cloud.host.Host.Type.Routing; } protected VDI getVDIbyLocationandSR(final Connection conn, final String loc, final SR sr) { try { final Set<VDI> vdis = sr.getVDIs(conn); for (final VDI vdi : vdis) { if (vdi.getLocation(conn).startsWith(loc)) { return vdi; } } final String msg = "can not getVDIbyLocationandSR " + loc; s_logger.warn(msg); return null; } catch (final XenAPIException e) { final String msg = "getVDIbyLocationandSR exception " + loc + " due to " + e.toString(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } catch (final Exception e) { final String msg = "getVDIbyLocationandSR exception " + loc + " due to " + e.getMessage(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } } public VDI getVDIbyUuid(final Connection conn, final String uuid) { return getVDIbyUuid(conn, uuid, true); } public VDI getVDIbyUuid(final Connection conn, final String uuid, final boolean throwExceptionIfNotFound) { try { return VDI.getByUuid(conn, uuid); } catch (final Exception e) { if (throwExceptionIfNotFound) { final String msg = "Catch Exception " + e.getClass().getName() + " :VDI getByUuid for uuid: " + uuid + " failed due to " + e.toString(); s_logger.debug(msg); throw new CloudRuntimeException(msg, e); } return null; } } public String getVhdParent(final Connection conn, final String primaryStorageSRUuid, final String snapshotUuid, final Boolean isISCSI) { final String parentUuid = callHostPlugin(conn, "vmopsSnapshot", "getVhdParent", "primaryStorageSRUuid", primaryStorageSRUuid, "snapshotUuid", snapshotUuid, "isISCSI", isISCSI.toString()); if (parentUuid == null || parentUuid.isEmpty() || parentUuid.equalsIgnoreCase("None")) { s_logger.debug("Unable to get parent of VHD " + snapshotUuid + " in SR " + primaryStorageSRUuid); // errString is already logged. return null; } return parentUuid; } public VIF getVifByMac(final Connection conn, final VM router, String mac) throws XmlRpcException, XenAPIException { final Set<VIF> routerVIFs = router.getVIFs(conn); mac = mac.trim(); for (final VIF vif : routerVIFs) { final String lmac = vif.getMAC(conn); if (lmac.trim().equals(mac)) { return vif; } } return null; } public VirtualRoutingResource getVirtualRoutingResource() { return _vrResource; } public VM getVM(final Connection conn, final String vmName) { // Look up VMs with the specified name Set<VM> vms; try { vms = VM.getByNameLabel(conn, vmName); } catch (final XenAPIException e) { throw new CloudRuntimeException("Unable to get " + vmName + ": " + e.toString(), e); } catch (final Exception e) { throw new CloudRuntimeException("Unable to get " + vmName + ": " + e.getMessage(), e); } // If there are no VMs, throw an exception if (vms.size() == 0) { throw new CloudRuntimeException("VM with name: " + vmName + " does not exist."); } // If there is more than one VM, print a warning if (vms.size() > 1) { s_logger.warn("Found " + vms.size() + " VMs with name: " + vmName); } // Return the first VM in the set return vms.iterator().next(); } public String getVMInstanceName() { return _instance; } public long getVMSnapshotChainSize(final Connection conn, final VolumeObjectTO volumeTo, final String vmName, final String vmSnapshotName) throws BadServerResponse, XenAPIException, XmlRpcException { if (volumeTo.getVolumeType() == Volume.Type.DATADISK) { final VDI dataDisk = VDI.getByUuid(conn, volumeTo.getPath()); if (dataDisk != null) { final String dataDiskName = dataDisk.getNameLabel(conn); if (dataDiskName != null && !dataDiskName.isEmpty()) { volumeTo.setName(dataDiskName); } } } final Set<VDI> allvolumeVDIs = VDI.getByNameLabel(conn, volumeTo.getName()); long size = 0; for (final VDI vdi : allvolumeVDIs) { try { if (vdi.getIsASnapshot(conn) && vdi.getSmConfig(conn).get("vhd-parent") != null) { final String parentUuid = vdi.getSmConfig(conn).get("vhd-parent"); final VDI parentVDI = VDI.getByUuid(conn, parentUuid); // add size of snapshot vdi node, usually this only contains // meta data size = size + vdi.getPhysicalUtilisation(conn); // add size of snapshot vdi parent, this contains data if (!isRefNull(parentVDI)) { size = size + parentVDI.getPhysicalUtilisation(conn).longValue(); } } } catch (final Exception e) { s_logger.debug("Exception occurs when calculate snapshot capacity for volumes: due to " + e.toString()); continue; } } if (volumeTo.getVolumeType() == Volume.Type.ROOT) { VM vm = getVM(conn, vmName); if (vm != null) { Set<VM> vmSnapshots = vm.getSnapshots(conn); if (vmSnapshots != null) { for (VM vmsnap : vmSnapshots) { try { final String vmSnapName = vmsnap.getNameLabel(conn); s_logger.debug("snapname " + vmSnapName); if (vmSnapName != null && vmSnapName.contains(vmSnapshotName) && vmsnap.getIsASnapshot(conn)) { s_logger.debug("snapname " + vmSnapName + "isASnapshot"); VDI memoryVDI = vmsnap.getSuspendVDI(conn); if (!isRefNull(memoryVDI)) { size = size + memoryVDI.getPhysicalUtilisation(conn); s_logger.debug("memoryVDI size :" + size); String parentUuid = memoryVDI.getSmConfig(conn).get("vhd-parent"); VDI pMemoryVDI = VDI.getByUuid(conn, parentUuid); if (!isRefNull(pMemoryVDI)) { size = size + pMemoryVDI.getPhysicalUtilisation(conn); } s_logger.debug("memoryVDI size+parent :" + size); } } } catch (Exception e) { s_logger.debug("Exception occurs when calculate snapshot capacity for memory: due to " + e.toString()); continue; } } } } } return size; } public PowerState getVmState(final Connection conn, final String vmName) { int retry = 3; while (retry-- > 0) { try { final Set<VM> vms = VM.getByNameLabel(conn, vmName); for (final VM vm : vms) { return convertToPowerState(vm.getPowerState(conn)); } } catch (final BadServerResponse e) { // There is a race condition within xenserver such that if a vm // is // deleted and we // happen to ask for it, it throws this stupid response. So // if this happens, // we take a nap and try again which then avoids the race // condition because // the vm's information is now cleaned up by xenserver. The // error // is as follows // com.xensource.xenapi.Types$BadServerResponse // [HANDLE_INVALID, VM, // 3dde93f9-c1df-55a7-2cde-55e1dce431ab] s_logger.info("Unable to get a vm PowerState due to " + e.toString() + ". We are retrying. Count: " + retry); try { Thread.sleep(3000); } catch (final InterruptedException ex) { } } catch (final XenAPIException e) { final String msg = "Unable to get a vm PowerState due to " + e.toString(); s_logger.warn(msg, e); break; } catch (final XmlRpcException e) { final String msg = "Unable to get a vm PowerState due to " + e.getMessage(); s_logger.warn(msg, e); break; } } return PowerState.PowerOff; } public HashMap<String, VmStatsEntry> getVmStats(final Connection conn, final GetVmStatsCommand cmd, final List<String> vmUUIDs, final String hostGuid) { final HashMap<String, VmStatsEntry> vmResponseMap = new HashMap<String, VmStatsEntry>(); for (final String vmUUID : vmUUIDs) { vmResponseMap.put(vmUUID, new VmStatsEntry(0, 0, 0, 0, 0, 0, 0, "vm")); } final Object[] rrdData = getRRDData(conn, 2); // call rrddata with 2 for // vm if (rrdData == null) { return null; } final Integer numRows = (Integer)rrdData[0]; final Integer numColumns = (Integer)rrdData[1]; final Node legend = (Node)rrdData[2]; final Node dataNode = (Node)rrdData[3]; final NodeList legendChildren = legend.getChildNodes(); for (int col = 0; col < numColumns; col++) { if (legendChildren == null || legendChildren.item(col) == null) { continue; } final String columnMetadata = getXMLNodeValue(legendChildren.item(col)); if (columnMetadata == null) { continue; } final String[] columnMetadataList = columnMetadata.split(":"); if (columnMetadataList.length != 4) { continue; } final String type = columnMetadataList[1]; final String uuid = columnMetadataList[2]; final String param = columnMetadataList[3]; if (type.equals("vm") && vmResponseMap.keySet().contains(uuid)) { final VmStatsEntry vmStatsAnswer = vmResponseMap.get(uuid); vmStatsAnswer.setEntityType("vm"); if (param.contains("cpu")) { vmStatsAnswer.setNumCPUs(vmStatsAnswer.getNumCPUs() + 1); vmStatsAnswer.setCPUUtilization(vmStatsAnswer.getCPUUtilization() + getDataAverage(dataNode, col, numRows)); } else if (param.matches("vif_\\d*_rx")) { vmStatsAnswer.setNetworkReadKBs(vmStatsAnswer.getNetworkReadKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES); } else if (param.matches("vif_\\d*_tx")) { vmStatsAnswer.setNetworkWriteKBs(vmStatsAnswer.getNetworkWriteKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES); } else if (param.matches("vbd_.*_read")) { vmStatsAnswer.setDiskReadKBs(vmStatsAnswer.getDiskReadKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES); } else if (param.matches("vbd_.*_write")) { vmStatsAnswer.setDiskWriteKBs(vmStatsAnswer.getDiskWriteKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES); } else if (param.contains("memory_internal_free")) { vmStatsAnswer.setIntFreeMemoryKBs(vmStatsAnswer.getIntFreeMemoryKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES); } else if (param.contains("memory_target")) { vmStatsAnswer.setTargetMemoryKBs(vmStatsAnswer.getTargetMemoryKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES); } else if (param.contains("memory")) { vmStatsAnswer.setMemoryKBs(vmStatsAnswer.getMemoryKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES); } } } for (final Map.Entry<String, VmStatsEntry> entry : vmResponseMap.entrySet()) { final VmStatsEntry vmStatsAnswer = entry.getValue(); if (vmStatsAnswer.getNumCPUs() != 0) { vmStatsAnswer.setCPUUtilization(vmStatsAnswer.getCPUUtilization() / vmStatsAnswer.getNumCPUs()); } vmStatsAnswer.setCPUUtilization(vmStatsAnswer.getCPUUtilization() * 100); if (s_logger.isDebugEnabled()) { s_logger.debug("Vm cpu utilization " + vmStatsAnswer.getCPUUtilization()); } } return vmResponseMap; } public String getVncUrl(final Connection conn, final VM vm) { VM.Record record; Console c; try { record = vm.getRecord(conn); final Set<Console> consoles = record.consoles; if (consoles.isEmpty()) { s_logger.warn("There are no Consoles available to the vm : " + record.nameDescription); return null; } final Iterator<Console> i = consoles.iterator(); while (i.hasNext()) { c = i.next(); if (c.getProtocol(conn) == Types.ConsoleProtocol.RFB) { return c.getLocation(conn); } } } catch (final XenAPIException e) { final String msg = "Unable to get console url due to " + e.toString(); s_logger.warn(msg, e); return null; } catch (final XmlRpcException e) { final String msg = "Unable to get console url due to " + e.getMessage(); s_logger.warn(msg, e); return null; } return null; } protected String getXMLNodeValue(final Node n) { return n.getChildNodes().item(0).getNodeValue(); } public void handleSrAndVdiDetach(final String iqn, final Connection conn) throws Exception { final SR sr = getStorageRepository(conn, iqn); removeSR(conn, sr); } protected void destroyUnattachedVBD(Connection conn, VM vm) { try { for (VBD vbd : vm.getVBDs(conn)) { if (Types.VbdType.DISK.equals(vbd.getType(conn)) && !vbd.getCurrentlyAttached(conn)) { vbd.destroy(conn); } } } catch (final Exception e) { s_logger.debug("Failed to destroy unattached VBD due to ", e); } } public String handleVmStartFailure(final Connection conn, final String vmName, final VM vm, final String message, final Throwable th) { final String msg = "Unable to start " + vmName + " due to " + message; s_logger.warn(msg, th); if (vm == null) { return msg; } try { final VM.Record vmr = vm.getRecord(conn); final List<Network> networks = new ArrayList<Network>(); for (final VIF vif : vmr.VIFs) { try { final VIF.Record rec = vif.getRecord(conn); if (rec != null) { networks.add(rec.network); } else { s_logger.warn("Unable to cleanup VIF: " + vif.toWireString() + " As vif record is null"); } } catch (final Exception e) { s_logger.warn("Unable to cleanup VIF", e); } } if (vmr.powerState == VmPowerState.RUNNING) { try { vm.hardShutdown(conn); } catch (final Exception e) { s_logger.warn("VM hardshutdown failed due to ", e); } } if (vm.getPowerState(conn) == VmPowerState.HALTED) { try { vm.destroy(conn); } catch (final Exception e) { s_logger.warn("VM destroy failed due to ", e); } } for (final VBD vbd : vmr.VBDs) { try { vbd.unplug(conn); vbd.destroy(conn); } catch (final Exception e) { s_logger.warn("Unable to clean up VBD due to ", e); } } for (final VIF vif : vmr.VIFs) { try { vif.unplug(conn); vif.destroy(conn); } catch (final Exception e) { s_logger.warn("Unable to cleanup VIF", e); } } for (final Network network : networks) { if (network.getNameLabel(conn).startsWith("VLAN")) { disableVlanNetwork(conn, network); } } } catch (final Exception e) { s_logger.warn("VM getRecord failed due to ", e); } return msg; } @Override public StartupCommand[] initialize() throws IllegalArgumentException { final Connection conn = getConnection(); if (!getHostInfo(conn)) { s_logger.warn("Unable to get host information for " + _host.getIp()); return null; } final StartupRoutingCommand cmd = new StartupRoutingCommand(); fillHostInfo(conn, cmd); cmd.setHypervisorType(HypervisorType.XenServer); cmd.setCluster(_cluster); cmd.setPoolSync(false); try { final Pool pool = Pool.getByUuid(conn, _host.getPool()); final Pool.Record poolr = pool.getRecord(conn); poolr.master.getRecord(conn); } catch (final Throwable e) { s_logger.warn("Check for master failed, failing the FULL Cluster sync command"); } List<StartupStorageCommand> startUpLocalStorageCommands = null; try { startUpLocalStorageCommands = initializeLocalSrs(conn); } catch (XenAPIException | XmlRpcException e) { s_logger.warn("Could not initialize local SRs on host: " + _host.getUuid(), e); } if (CollectionUtils.isEmpty(startUpLocalStorageCommands)) { return new StartupCommand[] {cmd}; } return createStartupCommandsArray(cmd, startUpLocalStorageCommands); } /** * We simply create an array and add the {@link StartupRoutingCommand} as the first element of the array. Then, we add all elements from startUpLocalStorageCommands */ private StartupCommand[] createStartupCommandsArray(StartupRoutingCommand startupRoutingCommand, List<StartupStorageCommand> startUpLocalStorageCommands) { StartupCommand[] startupCommands = new StartupCommand[startUpLocalStorageCommands.size() + 1]; startupCommands[0] = startupRoutingCommand; for (int i = 1; i < startupCommands.length; i++) { startupCommands[i] = startUpLocalStorageCommands.get(i - 1); } return startupCommands; } /** * This method will return a list of all local SRs. * An SR is considered local if it meets all of the following criteria: * <ul> * <li> {@link Record#shared} is equal to false * <li> The PBDs of the SR ({@link Record#PBDs}) are connected to host {@link #_host} * <li> SR type is equal to the {@link SRType} sent as parameter * </ul> */ protected List<SR> getAllLocalSrForType(Connection conn, SRType srType) throws XenAPIException, XmlRpcException { List<SR> localSrs = new ArrayList<>(); Map<SR, SR.Record> allSrRecords = SR.getAllRecords(conn); if (MapUtils.isEmpty(allSrRecords)) { return localSrs; } for (Map.Entry<SR, SR.Record> entry : allSrRecords.entrySet()) { SR.Record srRec = entry.getValue(); if (!srType.equals(srRec.type)) { continue; } if (BooleanUtils.toBoolean(srRec.shared)) { continue; } Set<PBD> pbds = srRec.PBDs; if (CollectionUtils.isEmpty(pbds)) { continue; } for (PBD pbd : pbds) { Host host = pbd.getHost(conn); if (!isRefNull(host) && org.apache.commons.lang3.StringUtils.equals(host.getUuid(conn), _host.getUuid())) { if (!pbd.getCurrentlyAttached(conn)) { s_logger.debug(String.format("PBD [%s] of local SR [%s] was unplugged, pluggin it now", pbd.getUuid(conn), srRec.uuid)); pbd.plug(conn); } s_logger.debug("Scanning local SR: " + srRec.uuid); SR sr = entry.getKey(); sr.scan(conn); localSrs.add(sr); } } } s_logger.debug(String.format("Found %d local storage of type [%s] for host [%s]", localSrs.size(), srType.toString(), _host.getUuid())); return localSrs; } /** * This method will prepare Local SRs to be used by Apache CloudStack. */ protected List<StartupStorageCommand> initializeLocalSrs(Connection conn) throws XenAPIException, XmlRpcException { List<StartupStorageCommand> localStorageStartupCommands = new ArrayList<>(); List<SR> allLocalSrs = getAllLocalSrs(conn); for (SR sr : allLocalSrs) { long totalCapacity = sr.getPhysicalSize(conn); if (totalCapacity > 0) { StartupStorageCommand cmd = createStartUpStorageCommand(conn, sr); localStorageStartupCommands.add(cmd); } } return localStorageStartupCommands; } /** * This method will retrieve all Local SRs according to {@link #getAllLocalSrForType(Connection, SRType)}. * The types used are {@link SRType#LVM} and {@link SRType#EXT}. * */ protected List<SR> getAllLocalSrs(Connection conn) throws XenAPIException, XmlRpcException { List<SR> allLocalSrLvmType = getAllLocalSrForType(conn, SRType.LVM); List<SR> allLocalSrExtType = getAllLocalSrForType(conn, SRType.EXT); List<SR> allLocalSrs = new ArrayList<>(allLocalSrLvmType); allLocalSrs.addAll(allLocalSrExtType); return allLocalSrs; } /** * This method creates the StartUp storage command for the local SR. * We will configure 'name-label' and 'description' using {@link #configureStorageNameAndDescription(Connection, SR)}. * Then, we will create the POJO {@link StoragePoolInfo} with SR's information using method {@link #createStoragePoolInfo(Connection, SR)}. */ protected StartupStorageCommand createStartUpStorageCommand(Connection conn, SR sr) throws XenAPIException, XmlRpcException { configureStorageNameAndDescription(conn, sr); StoragePoolInfo storagePoolInfo = createStoragePoolInfo(conn, sr); StartupStorageCommand cmd = new StartupStorageCommand(); cmd.setPoolInfo(storagePoolInfo); cmd.setGuid(_host.getUuid()); cmd.setDataCenter(Long.toString(_dcId)); cmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL); String.format("StartUp command created for local storage [%s] of type [%s] on host [%s]", storagePoolInfo.getUuid(), storagePoolInfo.getPoolType(), _host.getUuid()); return cmd; } /** * Instantiate {@link StoragePoolInfo} with SR's information. */ protected StoragePoolInfo createStoragePoolInfo(Connection conn, SR sr) throws XenAPIException, XmlRpcException { long totalCapacity = sr.getPhysicalSize(conn); String srUuid = sr.getUuid(conn); Host host = Host.getByUuid(conn, _host.getUuid()); String address = host.getAddress(conn); long availableCapacity = totalCapacity - sr.getPhysicalUtilisation(conn); String srType = sr.getType(conn).toUpperCase(); return new StoragePoolInfo(srUuid, address, srType, srType, StoragePoolType.valueOf(srType), totalCapacity, availableCapacity); } protected void configureStorageNameAndDescription(Connection conn, SR sr) throws XenAPIException, XmlRpcException { String srUuid = sr.getUuid(conn); sr.setNameLabel(conn, srUuid); String nameFormat = "Cloud Stack Local (%s) Storage Pool for %s"; sr.setNameDescription(conn, String.format(nameFormat, sr.getType(conn), _host.getUuid())); } public boolean isDeviceUsed(final Connection conn, final VM vm, final Long deviceId) { // Figure out the disk number to attach the VM to String msg = null; try { final Set<String> allowedVBDDevices = vm.getAllowedVBDDevices(conn); if (allowedVBDDevices.contains(deviceId.toString())) { return false; } return true; } catch (final XmlRpcException e) { msg = "Catch XmlRpcException due to: " + e.getMessage(); s_logger.warn(msg, e); } catch (final XenAPIException e) { msg = "Catch XenAPIException due to: " + e.toString(); s_logger.warn(msg, e); } throw new CloudRuntimeException("When check deviceId " + msg); } /** * When Dynamic Memory Control (DMC) is enabled - xenserver allows scaling * the guest memory while the guest is running * * By default this is disallowed, override the specific xenserver resource * if this is enabled */ public boolean isDmcEnabled(final Connection conn, final Host host) throws XenAPIException, XmlRpcException { return false; } public boolean IsISCSI(final String type) { return SRType.LVMOHBA.equals(type) || SRType.LVMOISCSI.equals(type) || SRType.LVM.equals(type); } public boolean isNetworkSetupByName(final String nameTag) throws XenAPIException, XmlRpcException { if (nameTag != null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Looking for network setup by name " + nameTag); } final Connection conn = getConnection(); final XsLocalNetwork network = getNetworkByName(conn, nameTag); if (network == null) { return false; } } return true; } public boolean isOvs() { return _isOvs; } public boolean isRefNull(final XenAPIObject object) { return object == null || object.toWireString().equals("OpaqueRef:NULL") || object.toWireString().equals("<not in database>"); } public boolean isSecurityGroupEnabled() { return _securityGroupEnabled; } public boolean isXcp() { final Connection conn = getConnection(); final String result = callHostPlugin(conn, "ovstunnel", "is_xcp"); if (result.equals("XCP")) { return true; } return false; } boolean killCopyProcess(final Connection conn, final String nameLabel) { final String results = callHostPluginAsync(conn, "vmops", "kill_copy_process", 60, "namelabel", nameLabel); String errMsg = null; if (results == null || results.equals("false")) { errMsg = "kill_copy_process failed"; s_logger.warn(errMsg); return false; } else { return true; } } public boolean launchHeartBeat(final Connection conn) { final String result = callHostPluginPremium(conn, "heartbeat", "host", _host.getUuid(), "timeout", Integer.toString(_heartbeatTimeout), "interval", Integer.toString(_heartbeatInterval)); if (result == null || !result.contains("> DONE <")) { s_logger.warn("Unable to launch the heartbeat process on " + _host.getIp()); return false; } return true; } protected String logX(final XenAPIObject obj, final String msg) { return new StringBuilder("Host ").append(_host.getIp()).append(" ").append(obj.toWireString()).append(": ").append(msg).toString(); } public void migrateVM(final Connection conn, final Host destHost, final VM vm, final String vmName) throws Exception { Task task = null; try { final Map<String, String> other = new HashMap<String, String>(); other.put("live", "true"); task = vm.poolMigrateAsync(conn, destHost, other); try { // poll every 1 seconds final long timeout = _migratewait * 1000L; waitForTask(conn, task, 1000, timeout); checkForSuccess(conn, task); } catch (final Types.HandleInvalid e) { if (vm.getResidentOn(conn).equals(destHost)) { task = null; return; } throw new CloudRuntimeException("migrate VM catch HandleInvalid and VM is not running on dest host"); } } catch (final XenAPIException e) { final String msg = "Unable to migrate VM(" + vmName + ") from host(" + _host.getUuid() + ")"; s_logger.warn(msg, e); throw new CloudRuntimeException(msg); } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e1) { s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); } } } } protected VDI mount(final Connection conn, final StoragePoolType poolType, final String volumeFolder, final String volumePath) { return getVDIbyUuid(conn, volumePath); } protected VDI mount(final Connection conn, final String vmName, final DiskTO volume) throws XmlRpcException, XenAPIException { final DataTO data = volume.getData(); final Volume.Type type = volume.getType(); if (type == Volume.Type.ISO) { final TemplateObjectTO iso = (TemplateObjectTO)data; final DataStoreTO store = iso.getDataStore(); if (store == null) { // It's a fake iso return null; } // corer case, xenserver pv driver iso final String templateName = iso.getName(); if (templateName.startsWith("xs-tools")) { try { final String actualTemplateName = getActualIsoTemplate(conn); final Set<VDI> vdis = VDI.getByNameLabel(conn, actualTemplateName); if (vdis.isEmpty()) { throw new CloudRuntimeException("Could not find ISO with URL: " + actualTemplateName); } return vdis.iterator().next(); } catch (final XenAPIException e) { throw new CloudRuntimeException("Unable to get pv iso: " + templateName + " due to " + e.toString()); } catch (final Exception e) { throw new CloudRuntimeException("Unable to get pv iso: " + templateName + " due to " + e.toString()); } } if (!(store instanceof NfsTO)) { throw new CloudRuntimeException("only support mount iso on nfs"); } final NfsTO nfsStore = (NfsTO)store; final String isoPath = nfsStore.getUrl() + File.separator + iso.getPath(); final int index = isoPath.lastIndexOf("/"); final String mountpoint = isoPath.substring(0, index); URI uri; try { uri = new URI(mountpoint); } catch (final URISyntaxException e) { throw new CloudRuntimeException("Incorrect uri " + mountpoint, e); } final SR isoSr = createIsoSRbyURI(conn, uri, vmName, false); final String isoname = isoPath.substring(index + 1); final VDI isoVdi = getVDIbyLocationandSR(conn, isoname, isoSr); if (isoVdi == null) { throw new CloudRuntimeException("Unable to find ISO " + isoPath); } return isoVdi; } else { final VolumeObjectTO vol = (VolumeObjectTO)data; return VDI.getByUuid(conn, vol.getPath()); } } public String networkUsage(final Connection conn, final String privateIpAddress, final String option, final String vif) { if (option.equals("get")) { return "0:0"; } return null; } private List<Pair<String, Long>> ovsFullSyncStates() { final Connection conn = getConnection(); final String result = callHostPlugin(conn, "ovsgre", "ovs_get_vm_log", "host_uuid", _host.getUuid()); final String[] logs = result != null ? result.split(";") : new String[0]; final List<Pair<String, Long>> states = new ArrayList<Pair<String, Long>>(); for (final String log : logs) { final String[] info = log.split(","); if (info.length != 5) { s_logger.warn("Wrong element number in ovs log(" + log + ")"); continue; } // ','.join([bridge, vmName, vmId, seqno, tag]) try { states.add(new Pair<String, Long>(info[0], Long.parseLong(info[3]))); } catch (final NumberFormatException nfe) { states.add(new Pair<String, Long>(info[0], -1L)); } } return states; } public HashMap<String, String> parseDefaultOvsRuleComamnd(final String str) { final HashMap<String, String> cmd = new HashMap<String, String>(); final String[] sarr = str.split("/"); for (int i = 0; i < sarr.length; i++) { String c = sarr[i]; c = c.startsWith("/") ? c.substring(1) : c; c = c.endsWith("/") ? c.substring(0, c.length() - 1) : c; final String[] p = c.split(";"); if (p.length != 2) { continue; } if (p[0].equalsIgnoreCase("vlans")) { p[1] = p[1].replace("@", "["); p[1] = p[1].replace("#", "]"); } cmd.put(p[0], p[1]); } return cmd; } protected Pair<Long, Integer> parseTimestamp(final String timeStampStr) { final String[] tokens = timeStampStr.split("-"); if (tokens.length != 3) { s_logger.debug("timeStamp in network has wrong pattern: " + timeStampStr); return null; } if (!tokens[0].equals("CsCreateTime")) { s_logger.debug("timeStamp in network doesn't start with CsCreateTime: " + timeStampStr); return null; } return new Pair<Long, Integer>(Long.parseLong(tokens[1]), Integer.parseInt(tokens[2])); } private void pbdPlug(final Connection conn, final PBD pbd, final String uuid) { try { if (s_logger.isDebugEnabled()) { s_logger.debug("Plugging in PBD " + uuid + " for " + _host); } pbd.plug(conn); } catch (final Exception e) { final String msg = "PBD " + uuid + " is not attached! and PBD plug failed due to " + e.toString() + ". Please check this PBD in " + _host; s_logger.warn(msg, e); throw new CloudRuntimeException(msg); } } protected boolean pingdomr(final Connection conn, final String host, final String port) { String status; status = callHostPlugin(conn, "vmops", "pingdomr", "host", host, "port", port); if (status == null || status.isEmpty()) { return false; } return true; } public boolean pingXAPI() { final Connection conn = getConnection(); try { final Host host = Host.getByUuid(conn, _host.getUuid()); if (!host.getEnabled(conn)) { s_logger.debug("Host " + _host.getIp() + " is not enabled!"); return false; } } catch (final Exception e) { s_logger.debug("cannot get host enabled status, host " + _host.getIp() + " due to " + e.toString(), e); return false; } try { callHostPlugin(conn, "echo", "main"); } catch (final Exception e) { s_logger.debug("cannot ping host " + _host.getIp() + " due to " + e.toString(), e); return false; } return true; } protected void plugDom0Vif(final Connection conn, final VIF dom0Vif) throws XmlRpcException, XenAPIException { if (dom0Vif != null) { dom0Vif.plug(conn); } } protected boolean postCreatePrivateTemplate(final Connection conn, final String templatePath, final String tmpltFilename, final String templateName, String templateDescription, String checksum, final long size, final long virtualSize, final long templateId) { if (templateDescription == null) { templateDescription = ""; } if (checksum == null) { checksum = ""; } final String result = callHostPlugin(conn, "vmopsSnapshot", "post_create_private_template", "templatePath", templatePath, "templateFilename", tmpltFilename, "templateName", templateName, "templateDescription", templateDescription, "checksum", checksum, "size", String.valueOf(size), "virtualSize", String.valueOf(virtualSize), "templateId", String.valueOf(templateId)); boolean success = false; if (result != null && !result.isEmpty()) { // Else, command threw an exception which has already been logged. if (result.equalsIgnoreCase("1")) { s_logger.debug("Successfully created template.properties file on secondary storage for " + tmpltFilename); success = true; } else { s_logger.warn("Could not create template.properties file on secondary storage for " + tmpltFilename + " for templateId: " + templateId); } } return success; } @Override public ExecutionResult prepareCommand(final NetworkElementCommand cmd) { // Update IP used to access router cmd.setRouterAccessIp(cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP)); assert cmd.getRouterAccessIp() != null; if (cmd instanceof IpAssocVpcCommand) { return prepareNetworkElementCommand((IpAssocVpcCommand)cmd); } else if (cmd instanceof IpAssocCommand) { return prepareNetworkElementCommand((IpAssocCommand)cmd); } else if (cmd instanceof SetupGuestNetworkCommand) { return prepareNetworkElementCommand((SetupGuestNetworkCommand)cmd); } else if (cmd instanceof SetSourceNatCommand) { return prepareNetworkElementCommand((SetSourceNatCommand)cmd); } else if (cmd instanceof SetNetworkACLCommand) { return prepareNetworkElementCommand((SetNetworkACLCommand)cmd); } return new ExecutionResult(true, null); } public void prepareISO(final Connection conn, final String vmName, List<String[]> vmDataList, String configDriveLabel) throws XmlRpcException, XenAPIException { final Set<VM> vms = VM.getByNameLabel(conn, vmName); if (vms == null || vms.size() != 1) { throw new CloudRuntimeException("There are " + (vms == null ? "0" : vms.size()) + " VMs named " + vmName); } final VM vm = vms.iterator().next(); if (vmDataList != null) { // create SR SR sr = createLocalIsoSR(conn, _configDriveSRName + getHost().getIp()); // 1. create vm data files createVmdataFiles(vmName, vmDataList, configDriveLabel); // 2. copy config drive iso to host copyConfigDriveIsoToHost(conn, sr, vmName); } final Set<VBD> vbds = vm.getVBDs(conn); for (final VBD vbd : vbds) { final VBD.Record vbdr = vbd.getRecord(conn); if (vbdr.type == Types.VbdType.CD && vbdr.empty == false && vbdr.userdevice.equals(_attachIsoDeviceNum)) { final VDI vdi = vbdr.VDI; final SR sr = vdi.getSR(conn); final Set<PBD> pbds = sr.getPBDs(conn); if (pbds == null) { throw new CloudRuntimeException("There is no pbd for sr " + sr); } for (final PBD pbd : pbds) { final PBD.Record pbdr = pbd.getRecord(conn); if (pbdr.host.getUuid(conn).equals(_host.getUuid())) { return; } } sr.setShared(conn, true); final Host host = Host.getByUuid(conn, _host.getUuid()); final PBD.Record pbdr = pbds.iterator().next().getRecord(conn); pbdr.host = host; pbdr.uuid = ""; final PBD pbd = PBD.create(conn, pbdr); pbdPlug(conn, pbd, pbd.getUuid(conn)); break; } } } // The idea here is to see if the DiskTO in question is from managed storage and does not yet have an SR. // If no SR, create it and create a VDI in it. public VDI prepareManagedDisk(final Connection conn, final DiskTO disk, final long vmId, final String vmName) throws Exception { final Map<String, String> details = disk.getDetails(); if (details == null) { return null; } final boolean isManaged = new Boolean(details.get(DiskTO.MANAGED)).booleanValue(); if (!isManaged) { return null; } final String iqn = details.get(DiskTO.IQN); final Set<SR> srNameLabels = SR.getByNameLabel(conn, iqn); if (srNameLabels.size() != 0) { return null; } final String vdiNameLabel = Volume.Type.ROOT.equals(disk.getType()) ? ("ROOT-" + vmId) : (vmName + "-DATA"); return prepareManagedStorage(conn, details, null, vdiNameLabel); } protected SR prepareManagedSr(final Connection conn, final Map<String, String> details) { final String iScsiName = details.get(DiskTO.IQN); final String storageHost = details.get(DiskTO.STORAGE_HOST); final String chapInitiatorUsername = details.get(DiskTO.CHAP_INITIATOR_USERNAME); final String chapInitiatorSecret = details.get(DiskTO.CHAP_INITIATOR_SECRET); final String mountpoint = details.get(DiskTO.MOUNT_POINT); final String protocoltype = details.get(DiskTO.PROTOCOL_TYPE); if (StoragePoolType.NetworkFilesystem.toString().equalsIgnoreCase(protocoltype)) { final String poolid = storageHost + ":" + mountpoint; final String namelable = mountpoint; final String volumedesc = storageHost + ":" + mountpoint; return getNfsSR(conn, poolid, namelable, storageHost, mountpoint, volumedesc); } else { return getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, false, SRType.LVMOISCSI.toString(), true); } } protected VDI prepareManagedStorage(final Connection conn, final Map<String, String> details, final String path, final String vdiNameLabel) throws Exception { final SR sr = prepareManagedSr(conn, details); VDI vdi = getVDIbyUuid(conn, path, false); final Long volumeSize = Long.parseLong(details.get(DiskTO.VOLUME_SIZE)); Set<VDI> vdisInSr = sr.getVDIs(conn); // If a VDI already exists in the SR (in case we cloned from a template cache), use that. if (vdisInSr.size() == 1) { vdi = vdisInSr.iterator().next(); } if (vdi == null) { vdi = createVdi(sr, vdiNameLabel, volumeSize); } else { // If vdi is not null, it must have already been created, so check whether a resize of the volume was performed. // If true, resize the VDI to the volume size. s_logger.info("Checking for the resize of the datadisk"); final long vdiVirtualSize = vdi.getVirtualSize(conn); if (vdiVirtualSize != volumeSize) { s_logger.info("Resizing the data disk (VDI) from vdiVirtualSize: " + vdiVirtualSize + " to volumeSize: " + volumeSize); try { vdi.resize(conn, volumeSize); } catch (final Exception e) { s_logger.warn("Unable to resize volume", e); } } // change the name-label in case of a cloned VDI if (!Objects.equals(vdi.getNameLabel(conn), vdiNameLabel)) { try { vdi.setNameLabel(conn, vdiNameLabel); } catch (final Exception e) { s_logger.warn("Unable to rename volume", e); } } } return vdi; } protected ExecutionResult prepareNetworkElementCommand(final IpAssocCommand cmd) { final Connection conn = getConnection(); final String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); final String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); try { final IpAddressTO[] ips = cmd.getIpAddresses(); for (final IpAddressTO ip : ips) { final VM router = getVM(conn, routerName); final NicTO nic = new NicTO(); nic.setMac(ip.getVifMacAddress()); nic.setType(ip.getTrafficType()); if (ip.getBroadcastUri() == null) { nic.setBroadcastType(BroadcastDomainType.Native); } else { final URI uri = BroadcastDomainType.fromString(ip.getBroadcastUri()); nic.setBroadcastType(BroadcastDomainType.getSchemeValue(uri)); nic.setBroadcastUri(uri); } nic.setDeviceId(0); nic.setNetworkRateMbps(ip.getNetworkRate()); nic.setName(ip.getNetworkName()); final Network network = getNetwork(conn, nic); // Determine the correct VIF on DomR to associate/disassociate // the // IP address with VIF correctVif = getCorrectVif(conn, router, network); // If we are associating an IP address and DomR doesn't have a // VIF // for the specified vlan ID, we need to add a VIF // If we are disassociating the last IP address in the VLAN, we // need // to remove a VIF boolean addVif = false; if (ip.isAdd() && correctVif == null) { addVif = true; } if (addVif) { // Add a new VIF to DomR final String vifDeviceNum = getLowestAvailableVIFDeviceNum(conn, router); if (vifDeviceNum == null) { throw new InternalErrorException("There were no more available slots for a new VIF on router: " + router.getNameLabel(conn)); } nic.setDeviceId(Integer.parseInt(vifDeviceNum)); correctVif = createVif(conn, routerName, router, null, nic); correctVif.plug(conn); // Add iptables rule for network usage networkUsage(conn, routerIp, "addVif", "eth" + correctVif.getDevice(conn)); } if (ip.isAdd() && correctVif == null) { throw new InternalErrorException("Failed to find DomR VIF to associate/disassociate IP with."); } if (correctVif != null) { ip.setNicDevId(Integer.valueOf(correctVif.getDevice(conn))); ip.setNewNic(addVif); } } } catch (final InternalErrorException e) { s_logger.error("Ip Assoc failure on applying one ip due to exception: ", e); return new ExecutionResult(false, e.getMessage()); } catch (final Exception e) { return new ExecutionResult(false, e.getMessage()); } return new ExecutionResult(true, null); } protected ExecutionResult prepareNetworkElementCommand(final IpAssocVpcCommand cmd) { final Connection conn = getConnection(); final String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); try { final IpAddressTO[] ips = cmd.getIpAddresses(); for (final IpAddressTO ip : ips) { final VM router = getVM(conn, routerName); final VIF correctVif = getVifByMac(conn, router, ip.getVifMacAddress()); setNicDevIdIfCorrectVifIsNotNull(conn, ip, correctVif); } } catch (final Exception e) { s_logger.error("Ip Assoc failure on applying one ip due to exception: ", e); return new ExecutionResult(false, e.getMessage()); } return new ExecutionResult(true, null); } protected ExecutionResult prepareNetworkElementCommand(final SetNetworkACLCommand cmd) { final Connection conn = getConnection(); final String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); try { final VM router = getVM(conn, routerName); final NicTO nic = cmd.getNic(); if (nic != null) { final VIF vif = getVifByMac(conn, router, nic.getMac()); if (vif == null) { final String msg = "Prepare SetNetworkACL failed due to VIF is null for : " + nic.getMac() + " with routername: " + routerName; s_logger.error(msg); return new ExecutionResult(false, msg); } nic.setDeviceId(Integer.parseInt(vif.getDevice(conn))); } else { final String msg = "Prepare SetNetworkACL failed due to nic is null for : " + routerName; s_logger.error(msg); return new ExecutionResult(false, msg); } } catch (final Exception e) { final String msg = "Prepare SetNetworkACL failed due to " + e.toString(); s_logger.error(msg, e); return new ExecutionResult(false, msg); } return new ExecutionResult(true, null); } protected ExecutionResult prepareNetworkElementCommand(final SetSourceNatCommand cmd) { final Connection conn = getConnection(); final String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); final IpAddressTO pubIp = cmd.getIpAddress(); try { final VM router = getVM(conn, routerName); final VIF correctVif = getCorrectVif(conn, router, pubIp); pubIp.setNicDevId(Integer.valueOf(correctVif.getDevice(conn))); } catch (final Exception e) { final String msg = "Ip SNAT failure due to " + e.toString(); s_logger.error(msg, e); return new ExecutionResult(false, msg); } return new ExecutionResult(true, null); } /** * @param cmd * @return */ private ExecutionResult prepareNetworkElementCommand(final SetupGuestNetworkCommand cmd) { final Connection conn = getConnection(); final NicTO nic = cmd.getNic(); final String domrName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); try { final Set<VM> vms = VM.getByNameLabel(conn, domrName); if (vms == null || vms.isEmpty()) { return new ExecutionResult(false, "Can not find VM " + domrName); } final VM vm = vms.iterator().next(); final String mac = nic.getMac(); VIF domrVif = null; for (final VIF vif : vm.getVIFs(conn)) { final String lmac = vif.getMAC(conn); if (lmac.equals(mac)) { domrVif = vif; // Do not break it! We have 2 routers. // break; } } if (domrVif == null) { return new ExecutionResult(false, "Can not find vif with mac " + mac + " for VM " + domrName); } nic.setDeviceId(Integer.parseInt(domrVif.getDevice(conn))); } catch (final Exception e) { final String msg = "Creating guest network failed due to " + e.toString(); s_logger.warn(msg, e); return new ExecutionResult(false, msg); } return new ExecutionResult(true, null); } public void rebootVM(final Connection conn, final VM vm, final String vmName) throws Exception { Task task = null; try { task = vm.cleanRebootAsync(conn); try { // poll every 1 seconds , timeout after 10 minutes waitForTask(conn, task, 1000, 10 * 60 * 1000); checkForSuccess(conn, task); } catch (final Types.HandleInvalid e) { if (vm.getPowerState(conn) == VmPowerState.RUNNING) { task = null; return; } throw new CloudRuntimeException("Reboot VM catch HandleInvalid and VM is not in RUNNING state"); } } catch (final XenAPIException e) { s_logger.debug("Unable to Clean Reboot VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString() + ", try hard reboot"); try { vm.hardReboot(conn); } catch (final Exception e1) { final String msg = "Unable to hard Reboot VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString(); s_logger.warn(msg, e1); throw new CloudRuntimeException(msg); } } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e1) { s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); } } } } protected void skipOrRemoveSR(Connection conn, SR sr) { if (sr == null) { return; } if (s_logger.isDebugEnabled()) { s_logger.debug(logX(sr, "Removing SR")); } try { Set<VDI> vdis = sr.getVDIs(conn); for (VDI vdi : vdis) { if (MapUtils.isEmpty(vdi.getCurrentOperations(conn))) { continue; } return; } removeSR(conn, sr); return; } catch (XenAPIException | XmlRpcException e) { s_logger.warn(logX(sr, "Unable to get current opertions " + e.toString()), e); } String msg = "Remove SR failed"; s_logger.warn(msg); } public void removeSR(final Connection conn, final SR sr) { if (sr == null) { return; } if (s_logger.isDebugEnabled()) { s_logger.debug(logX(sr, "Removing SR")); } for (int i = 0; i < 2; i++) { try { final Set<VDI> vdis = sr.getVDIs(conn); for (final VDI vdi : vdis) { vdi.forget(conn); } Set<PBD> pbds = sr.getPBDs(conn); for (final PBD pbd : pbds) { if (s_logger.isDebugEnabled()) { s_logger.debug(logX(pbd, "Unplugging pbd")); } // if (pbd.getCurrentlyAttached(conn)) { pbd.unplug(conn); // } pbd.destroy(conn); } pbds = sr.getPBDs(conn); if (pbds.size() == 0) { if (s_logger.isDebugEnabled()) { s_logger.debug(logX(sr, "Forgetting")); } sr.forget(conn); return; } if (s_logger.isDebugEnabled()) { s_logger.debug(logX(sr, "There is still one or more PBDs attached.")); if (s_logger.isTraceEnabled()) { for (final PBD pbd : pbds) { s_logger.trace(logX(pbd, " Still attached")); } } } } catch (final XenAPIException e) { s_logger.debug(logX(sr, "Catch XenAPIException: " + e.toString())); } catch (final XmlRpcException e) { s_logger.debug(logX(sr, "Catch Exception: " + e.getMessage())); } } s_logger.warn(logX(sr, "Unable to remove SR")); } protected String removeSRSync(final Connection conn, final SR sr) { if (sr == null) { return null; } if (s_logger.isDebugEnabled()) { s_logger.debug(logX(sr, "Removing SR")); } long waittime = 0; try { final Set<VDI> vdis = sr.getVDIs(conn); for (final VDI vdi : vdis) { final Map<java.lang.String, Types.VdiOperations> currentOperation = vdi.getCurrentOperations(conn); if (currentOperation == null || currentOperation.size() == 0) { continue; } if (waittime >= 1800000) { final String msg = "This template is being used, try late time"; s_logger.warn(msg); return msg; } waittime += 30000; try { Thread.sleep(30000); } catch (final InterruptedException ex) { } } removeSR(conn, sr); return null; } catch (final XenAPIException e) { s_logger.warn(logX(sr, "Unable to get current opertions " + e.toString()), e); } catch (final XmlRpcException e) { s_logger.warn(logX(sr, "Unable to get current opertions " + e.getMessage()), e); } final String msg = "Remove SR failed"; s_logger.warn(msg); return msg; } public String revertToSnapshot(final Connection conn, final VM vmSnapshot, final String vmName, final String oldVmUuid, final Boolean snapshotMemory, final String hostUUID) throws XenAPIException, XmlRpcException { final String results = callHostPluginAsync(conn, "vmopsSnapshot", "revert_memory_snapshot", 10 * 60 * 1000, "snapshotUUID", vmSnapshot.getUuid(conn), "vmName", vmName, "oldVmUuid", oldVmUuid, "snapshotMemory", snapshotMemory.toString(), "hostUUID", hostUUID); String errMsg = null; if (results == null || results.isEmpty()) { errMsg = "revert_memory_snapshot return null"; } else { if (results.equals("0")) { return results; } else { errMsg = "revert_memory_snapshot exception"; } } s_logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } public void scaleVM(final Connection conn, final VM vm, final VirtualMachineTO vmSpec, final Host host) throws XenAPIException, XmlRpcException { final Long staticMemoryMax = vm.getMemoryStaticMax(conn); final Long staticMemoryMin = vm.getMemoryStaticMin(conn); final Long newDynamicMemoryMin = vmSpec.getMinRam(); final Long newDynamicMemoryMax = vmSpec.getMaxRam(); if (staticMemoryMin > newDynamicMemoryMin || newDynamicMemoryMax > staticMemoryMax) { throw new CloudRuntimeException("Cannot scale up the vm because of memory constraint violation: " + "0 <= memory-static-min(" + staticMemoryMin + ") <= memory-dynamic-min(" + newDynamicMemoryMin + ") <= memory-dynamic-max(" + newDynamicMemoryMax + ") <= memory-static-max(" + staticMemoryMax + ")"); } vm.setMemoryDynamicRange(conn, newDynamicMemoryMin, newDynamicMemoryMax); vm.setVCPUsNumberLive(conn, (long)vmSpec.getCpus()); final Integer speed = vmSpec.getMinSpeed(); if (speed != null) { int cpuWeight = _maxWeight; // cpu_weight // weight based allocation cpuWeight = (int)(speed * 0.99 / _host.getSpeed() * _maxWeight); if (cpuWeight > _maxWeight) { cpuWeight = _maxWeight; } if (vmSpec.getLimitCpuUse()) { long utilization = 0; // max CPU cap, default is unlimited utilization = (int)(vmSpec.getMaxSpeed() * 0.99 * vmSpec.getCpus() / _host.getSpeed() * 100); // vm.addToVCPUsParamsLive(conn, "cap", // Long.toString(utilization)); currently xenserver doesnot // support Xapi to add VCPUs params live. callHostPlugin(conn, "vmops", "add_to_VCPUs_params_live", "key", "cap", "value", Long.toString(utilization), "vmname", vmSpec.getName()); } // vm.addToVCPUsParamsLive(conn, "weight", // Integer.toString(cpuWeight)); callHostPlugin(conn, "vmops", "add_to_VCPUs_params_live", "key", "weight", "value", Integer.toString(cpuWeight), "vmname", vmSpec.getName()); } } @Override public void setAgentControl(final IAgentControl agentControl) { _agentControl = agentControl; } public void setCanBridgeFirewall(final boolean canBridgeFirewall) { _canBridgeFirewall = canBridgeFirewall; } @Override public void setConfigParams(final Map<String, Object> params) { } public boolean setIptables(final Connection conn) { final String result = callHostPlugin(conn, "vmops", "setIptables"); if (result == null || result.isEmpty()) { return false; } return true; } public void setIsOvs(final boolean isOvs) { _isOvs = isOvs; } /** * WARN: static-min <= dynamic-min <= dynamic-max <= static-max * * @see XcpServerResource#setMemory(com.xensource.xenapi.Connection, * com.xensource.xenapi.VM, long, long) * @param conn * @param vm * @param minMemsize * @param maxMemsize * @throws XmlRpcException * @throws XenAPIException */ protected void setMemory(final Connection conn, final VM vm, final long minMemsize, final long maxMemsize) throws XmlRpcException, XenAPIException { vm.setMemoryLimits(conn, mem_128m, maxMemsize, minMemsize, maxMemsize); } @Override public void setName(final String name) { } protected void setNicDevIdIfCorrectVifIsNotNull(final Connection conn, final IpAddressTO ip, final VIF correctVif) throws InternalErrorException, BadServerResponse, XenAPIException, XmlRpcException { if (correctVif == null) { if (ip.isAdd()) { throw new InternalErrorException("Failed to find DomR VIF to associate IP with."); } else { s_logger.debug("VIF to deassociate IP with does not exist, return success"); } } else { ip.setNicDevId(Integer.valueOf(correctVif.getDevice(conn))); } } @Override public void setRunLevel(final int level) { } public String setupHeartbeatSr(final Connection conn, final SR sr, final boolean force) throws XenAPIException, XmlRpcException { final SR.Record srRec = sr.getRecord(conn); final String srUuid = srRec.uuid; if (!srRec.shared || !SRType.LVMOHBA.equals(srRec.type) && !SRType.LVMOISCSI.equals(srRec.type) && !SRType.NFS.equals(srRec.type)) { return srUuid; } String result = null; final Host host = Host.getByUuid(conn, _host.getUuid()); final Set<String> tags = host.getTags(conn); if (force || !tags.contains("cloud-heartbeat-" + srUuid)) { if (s_logger.isDebugEnabled()) { s_logger.debug("Setting up the heartbeat sr for host " + _host.getIp() + " and sr " + srUuid); } final Set<PBD> pbds = sr.getPBDs(conn); for (final PBD pbd : pbds) { final PBD.Record pbdr = pbd.getRecord(conn); if (!pbdr.currentlyAttached && pbdr.host.getUuid(conn).equals(_host.getUuid())) { pbd.plug(conn); break; } } result = callHostPluginThroughMaster(conn, "vmopspremium", "setup_heartbeat_sr", "host", _host.getUuid(), "sr", srUuid); if (result == null || !result.split("#")[1].equals("0")) { throw new CloudRuntimeException("Unable to setup heartbeat sr on SR " + srUuid + " due to " + result); } if (!tags.contains("cloud-heartbeat-" + srUuid)) { tags.add("cloud-heartbeat-" + srUuid); host.setTags(conn, tags); } } result = callHostPluginPremium(conn, "setup_heartbeat_file", "host", _host.getUuid(), "sr", srUuid, "add", "true"); if (result == null || !result.split("#")[1].equals("0")) { throw new CloudRuntimeException("Unable to setup heartbeat file entry on SR " + srUuid + " due to " + result); } return srUuid; } public void setupLinkLocalNetwork(final Connection conn) { try { final Network.Record rec = new Network.Record(); final Set<Network> networks = Network.getByNameLabel(conn, _linkLocalPrivateNetworkName); Network linkLocal = null; if (networks.size() == 0) { rec.nameDescription = "link local network used by system vms"; rec.nameLabel = _linkLocalPrivateNetworkName; final Map<String, String> configs = new HashMap<String, String>(); configs.put("ip_begin", NetUtils.getLinkLocalGateway()); configs.put("ip_end", NetUtils.getLinkLocalIpEnd()); configs.put("netmask", NetUtils.getLinkLocalNetMask()); configs.put("vswitch-disable-in-band", "true"); rec.otherConfig = configs; linkLocal = Network.create(conn, rec); } else { linkLocal = networks.iterator().next(); if (!linkLocal.getOtherConfig(conn).containsKey("vswitch-disable-in-band")) { linkLocal.addToOtherConfig(conn, "vswitch-disable-in-band", "true"); } } /* Make sure there is a physical bridge on this network */ VIF dom0vif = null; final Pair<VM, VM.Record> vm = getControlDomain(conn); final VM dom0 = vm.first(); final Set<VIF> vifs = dom0.getVIFs(conn); if (vifs.size() != 0) { for (final VIF vif : vifs) { final Map<String, String> otherConfig = vif.getOtherConfig(conn); if (otherConfig != null) { final String nameLabel = otherConfig.get("nameLabel"); if (nameLabel != null && nameLabel.equalsIgnoreCase("link_local_network_vif")) { dom0vif = vif; } } } } /* create temp VIF0 */ if (dom0vif == null) { s_logger.debug("Can't find a vif on dom0 for link local, creating a new one"); final VIF.Record vifr = new VIF.Record(); vifr.VM = dom0; vifr.device = getLowestAvailableVIFDeviceNum(conn, dom0); if (vifr.device == null) { s_logger.debug("Failed to create link local network, no vif available"); return; } final Map<String, String> config = new HashMap<String, String>(); config.put("nameLabel", "link_local_network_vif"); vifr.otherConfig = config; vifr.MAC = "FE:FF:FF:FF:FF:FF"; vifr.network = linkLocal; vifr.lockingMode = Types.VifLockingMode.NETWORK_DEFAULT; dom0vif = VIF.create(conn, vifr); plugDom0Vif(conn, dom0vif); } else { s_logger.debug("already have a vif on dom0 for link local network"); if (!dom0vif.getCurrentlyAttached(conn)) { plugDom0Vif(conn, dom0vif); } } final String brName = linkLocal.getBridge(conn); callHostPlugin(conn, "vmops", "setLinkLocalIP", "brName", brName); _host.setLinkLocalNetwork(linkLocal.getUuid(conn)); } catch (final XenAPIException e) { s_logger.warn("Unable to create local link network", e); throw new CloudRuntimeException("Unable to create local link network due to " + e.toString(), e); } catch (final XmlRpcException e) { s_logger.warn("Unable to create local link network", e); throw new CloudRuntimeException("Unable to create local link network due to " + e.toString(), e); } } /* return : if setup is needed */ public boolean setupServer(final Connection conn, final Host host) { final String packageVersion = CitrixResourceBase.class.getPackage().getImplementationVersion(); final String version = this.getClass().getName() + "-" + (packageVersion == null ? Long.toString(System.currentTimeMillis()) : packageVersion); try { /* push patches to XenServer */ final Host.Record hr = host.getRecord(conn); final Iterator<String> it = hr.tags.iterator(); while (it.hasNext()) { final String tag = it.next(); if (tag.startsWith("vmops-version-")) { if (tag.contains(version)) { s_logger.info(logX(host, "Host " + hr.address + " is already setup.")); return false; } else { it.remove(); } } } final com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(hr.address, 22); try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password.peek())) { throw new CloudRuntimeException("Unable to authenticate"); } final String cmd = "mkdir -p /opt/cloud/bin /var/log/cloud"; if (!SSHCmdHelper.sshExecuteCmd(sshConnection, cmd)) { throw new CloudRuntimeException("Cannot create directory /opt/cloud/bin on XenServer hosts"); } final SCPClient scp = new SCPClient(sshConnection); final List<File> files = getPatchFiles(); if (files == null || files.isEmpty()) { throw new CloudRuntimeException("Can not find patch file"); } for (final File file : files) { final String path = file.getParentFile().getAbsolutePath() + "/"; final Properties props = PropertiesUtil.loadFromFile(file); for (final Map.Entry<Object, Object> entry : props.entrySet()) { final String k = (String)entry.getKey(); final String v = (String)entry.getValue(); assert k != null && k.length() > 0 && v != null && v.length() > 0 : "Problems with " + k + "=" + v; final String[] tokens = v.split(","); String f = null; if (tokens.length == 3 && tokens[0].length() > 0) { if (tokens[0].startsWith("/")) { f = tokens[0]; } else if (tokens[0].startsWith("~")) { final String homedir = System.getenv("HOME"); f = homedir + tokens[0].substring(1) + k; } else { f = path + tokens[0] + '/' + k; } } else { f = path + k; } final String directoryPath = tokens[tokens.length - 1]; f = f.replace('/', File.separatorChar); String permissions = "0755"; if (tokens.length == 3) { permissions = tokens[1]; } else if (tokens.length == 2) { permissions = tokens[0]; } if (!new File(f).exists()) { s_logger.warn("We cannot locate " + f); continue; } if (s_logger.isDebugEnabled()) { s_logger.debug("Copying " + f + " to " + directoryPath + " on " + hr.address + " with permission " + permissions); } if (!SSHCmdHelper.sshExecuteCmd(sshConnection, "mkdir -m 700 -p " + directoryPath)) { s_logger.debug("Unable to create destination path: " + directoryPath + " on " + hr.address + "."); } try { scp.put(f, directoryPath, permissions); } catch (final IOException e) { final String msg = "Unable to copy file " + f + " to path " + directoryPath + " with permissions " + permissions; s_logger.debug(msg); throw new CloudRuntimeException("Unable to setup the server: " + msg, e); } } } } catch (final IOException e) { throw new CloudRuntimeException("Unable to setup the server correctly", e); } finally { sshConnection.close(); } hr.tags.add("vmops-version-" + version); host.setTags(conn, hr.tags); return true; } catch (final XenAPIException e) { final String msg = "XenServer setup failed due to " + e.toString(); s_logger.warn(msg, e); throw new CloudRuntimeException("Unable to get host information " + e.toString(), e); } catch (final XmlRpcException e) { final String msg = "XenServer setup failed due to " + e.getMessage(); s_logger.warn(msg, e); throw new CloudRuntimeException("Unable to get host information ", e); } } public synchronized Network setupvSwitchNetwork(final Connection conn) { try { if (_host.getVswitchNetwork() == null) { Network vswitchNw = null; final Network.Record rec = new Network.Record(); final String nwName = Networks.BroadcastScheme.VSwitch.toString(); final Set<Network> networks = Network.getByNameLabel(conn, nwName); if (networks.size() == 0) { rec.nameDescription = "vswitch network for " + nwName; rec.nameLabel = nwName; vswitchNw = Network.create(conn, rec); } else { vswitchNw = networks.iterator().next(); } _host.setVswitchNetwork(vswitchNw); } return _host.getVswitchNetwork(); } catch (final BadServerResponse e) { s_logger.error("Failed to setup vswitch network", e); } catch (final XenAPIException e) { s_logger.error("Failed to setup vswitch network", e); } catch (final XmlRpcException e) { s_logger.error("Failed to setup vswitch network", e); } return null; } public void shutdownVM(final Connection conn, final VM vm, final String vmName, final boolean forcedStop) throws XmlRpcException { Task task = null; try { if (forcedStop) { task = vm.hardShutdownAsync(conn); } else { task = vm.cleanShutdownAsync(conn); } try { // poll every 1 seconds , timeout after 10 minutes waitForTask(conn, task, 1000, 10 * 60 * 1000); checkForSuccess(conn, task); } catch (final TimeoutException e) { if (vm.getPowerState(conn) == VmPowerState.HALTED) { task = null; return; } throw new CloudRuntimeException("Shutdown VM catch HandleInvalid and VM is not in HALTED state"); } } catch (final XenAPIException e) { s_logger.debug("Unable to shutdown VM(" + vmName + ") with force=" + forcedStop + " on host(" + _host.getUuid() + ") due to " + e.toString()); try { VmPowerState state = vm.getPowerState(conn); if (state == VmPowerState.RUNNING) { try { vm.hardShutdown(conn); } catch (final Exception e1) { s_logger.debug("Unable to hardShutdown VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString()); state = vm.getPowerState(conn); if (state == VmPowerState.RUNNING) { forceShutdownVM(conn, vm); } return; } } else if (state == VmPowerState.HALTED) { return; } else { final String msg = "After cleanShutdown the VM status is " + state.toString() + ", that is not expected"; s_logger.warn(msg); throw new CloudRuntimeException(msg); } } catch (final Exception e1) { final String msg = "Unable to hardShutdown VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString(); s_logger.warn(msg, e1); throw new CloudRuntimeException(msg); } } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e1) { s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); } } } } @Override public boolean start() { return true; } public void startVM(final Connection conn, final Host host, final VM vm, final String vmName) throws Exception { Task task = null; try { task = vm.startOnAsync(conn, host, false, true); try { // poll every 1 seconds , timeout after 10 minutes waitForTask(conn, task, 1000, 10 * 60 * 1000); checkForSuccess(conn, task); } catch (final Types.HandleInvalid e) { if (vm.getPowerState(conn) == VmPowerState.RUNNING) { s_logger.debug("VM " + vmName + " is in Running status", e); task = null; return; } throw new CloudRuntimeException("Start VM " + vmName + " catch HandleInvalid and VM is not in RUNNING state"); } catch (final TimeoutException e) { if (vm.getPowerState(conn) == VmPowerState.RUNNING) { s_logger.debug("VM " + vmName + " is in Running status", e); task = null; return; } throw new CloudRuntimeException("Start VM " + vmName + " catch BadAsyncResult and VM is not in RUNNING state"); } } catch (final XenAPIException e) { final String msg = "Unable to start VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg); } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e1) { s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); } } } } protected void startvmfailhandle(final Connection conn, final VM vm, final List<Ternary<SR, VDI, VolumeVO>> mounts) { if (vm != null) { try { if (vm.getPowerState(conn) == VmPowerState.RUNNING) { try { vm.hardShutdown(conn); } catch (final Exception e) { final String msg = "VM hardshutdown failed due to " + e.toString(); s_logger.warn(msg, e); } } if (vm.getPowerState(conn) == VmPowerState.HALTED) { try { vm.destroy(conn); } catch (final Exception e) { final String msg = "VM destroy failed due to " + e.toString(); s_logger.warn(msg, e); } } } catch (final Exception e) { final String msg = "VM getPowerState failed due to " + e.toString(); s_logger.warn(msg, e); } } if (mounts != null) { for (final Ternary<SR, VDI, VolumeVO> mount : mounts) { final VDI vdi = mount.second(); Set<VBD> vbds = null; try { vbds = vdi.getVBDs(conn); } catch (final Exception e) { final String msg = "VDI getVBDS failed due to " + e.toString(); s_logger.warn(msg, e); continue; } for (final VBD vbd : vbds) { try { vbd.unplug(conn); vbd.destroy(conn); } catch (final Exception e) { final String msg = "VBD destroy failed due to " + e.toString(); s_logger.warn(msg, e); } } } } } @Override public boolean stop() { disconnected(); return true; } private HashMap<String, Pair<Long, Long>> syncNetworkGroups(final Connection conn, final long id) { final HashMap<String, Pair<Long, Long>> states = new HashMap<String, Pair<Long, Long>>(); final String result = callHostPlugin(conn, "vmops", "get_rule_logs_for_vms", "host_uuid", _host.getUuid()); s_logger.trace("syncNetworkGroups: id=" + id + " got: " + result); final String[] rulelogs = result != null ? result.split(";") : new String[0]; for (final String rulesforvm : rulelogs) { final String[] log = rulesforvm.split(","); if (log.length != 6) { continue; } // output = ','.join([vmName, vmID, vmIP, domID, signature, seqno]) try { states.put(log[0], new Pair<Long, Long>(Long.parseLong(log[1]), Long.parseLong(log[5]))); } catch (final NumberFormatException nfe) { states.put(log[0], new Pair<Long, Long>(-1L, -1L)); } } return states; } public boolean transferManagementNetwork(final Connection conn, final Host host, final PIF src, final PIF.Record spr, final PIF dest) throws XmlRpcException, XenAPIException { dest.reconfigureIp(conn, spr.ipConfigurationMode, spr.IP, spr.netmask, spr.gateway, spr.DNS); Host.managementReconfigure(conn, dest); String hostUuid = null; int count = 0; while (count < 10) { try { Thread.sleep(10000); hostUuid = host.getUuid(conn); if (hostUuid != null) { break; } ++count; } catch (final XmlRpcException e) { s_logger.debug("Waiting for host to come back: " + e.getMessage()); } catch (final XenAPIException e) { s_logger.debug("Waiting for host to come back: " + e.getMessage()); } catch (final InterruptedException e) { s_logger.debug("Gotta run"); return false; } } if (hostUuid == null) { s_logger.warn("Unable to transfer the management network from " + spr.uuid); return false; } src.reconfigureIp(conn, Types.IpConfigurationMode.NONE, null, null, null, null); return true; } protected void umount(final Connection conn, final VDI vdi) { } public void umountSnapshotDir(final Connection conn, final Long dcId) { try { callHostPlugin(conn, "vmopsSnapshot", "unmountSnapshotsDir", "dcId", dcId.toString()); } catch (final Exception e) { s_logger.debug("Failed to umount snapshot dir", e); } } public String upgradeSnapshot(final Connection conn, final String templatePath, final String snapshotPath) { final String results = callHostPluginAsync(conn, "vmopspremium", "upgrade_snapshot", 2 * 60 * 60, "templatePath", templatePath, "snapshotPath", snapshotPath); if (results == null || results.isEmpty()) { final String msg = "upgrade_snapshot return null"; s_logger.warn(msg); throw new CloudRuntimeException(msg); } final String[] tmp = results.split("#"); final String status = tmp[0]; if (status.equals("0")) { return results; } else { s_logger.warn(results); throw new CloudRuntimeException(results); } } public void waitForTask(final Connection c, final Task task, final long pollInterval, final long timeout) throws XenAPIException, XmlRpcException, TimeoutException { final long beginTime = System.currentTimeMillis(); if (s_logger.isTraceEnabled()) { s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") sent to " + c.getSessionReference() + " is pending completion with a " + timeout + "ms timeout"); } while (task.getStatus(c) == Types.TaskStatusType.PENDING) { try { if (s_logger.isTraceEnabled()) { s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") is pending, sleeping for " + pollInterval + "ms"); } Thread.sleep(pollInterval); } catch (final InterruptedException e) { } if (System.currentTimeMillis() - beginTime > timeout) { final String msg = "Async " + timeout / 1000 + " seconds timeout for task " + task.toString(); s_logger.warn(msg); task.cancel(c); task.destroy(c); throw new TimeoutException(msg); } } } public boolean createAndAttachConfigDriveIsoForVM(final Connection conn, final VM vm, final List<String[]> vmDataList, final String configDriveLabel) throws XenAPIException, XmlRpcException { final String vmName = vm.getNameLabel(conn); // create SR final SR sr = createLocalIsoSR(conn, _configDriveSRName + _host.getIp()); if (sr == null) { s_logger.debug("Failed to create local SR for the config drive"); return false; } s_logger.debug("Creating vm data files in config drive for vm " + vmName); // 1. create vm data files if (!createVmdataFiles(vmName, vmDataList, configDriveLabel)) { s_logger.debug("Failed to create vm data files in config drive for vm " + vmName); return false; } // 2. copy config drive iso to host if (!copyConfigDriveIsoToHost(conn, sr, vmName)) { return false; } // 3. attachIsoToVM if (!attachConfigDriveIsoToVm(conn, vm)) { return false; } return true; } public boolean createVmdataFiles(final String vmName, final List<String[]> vmDataList, final String configDriveLabel) { // add vm iso to the isolibrary final String isoPath = "/tmp/" + vmName + "/configDrive/"; final String configDriveName = "cloudstack/"; //create folder for the VM //Remove the folder before creating it. try { deleteLocalFolder("/tmp/" + isoPath); } catch (final IOException e) { s_logger.debug("Failed to delete the exiting config drive for vm " + vmName + " " + e.getMessage()); } catch (final Exception e) { s_logger.debug("Failed to delete the exiting config drive for vm " + vmName + " " + e.getMessage()); } if (vmDataList != null) { for (final String[] item : vmDataList) { final String dataType = item[0]; final String fileName = item[1]; final String content = item[2]; // create file with content in folder if (dataType != null && !dataType.isEmpty()) { //create folder final String folder = isoPath + configDriveName + dataType; if (folder != null && !folder.isEmpty()) { final File dir = new File(folder); final boolean result = true; try { if (!dir.exists()) { dir.mkdirs(); } } catch (final SecurityException ex) { s_logger.debug("Failed to create dir " + ex.getMessage()); return false; } if (result && content != null && !content.isEmpty()) { File file = new File(folder + "/" + fileName + ".txt"); try (OutputStreamWriter fw = new OutputStreamWriter(new FileOutputStream(file.getAbsoluteFile()), "UTF-8"); BufferedWriter bw = new BufferedWriter(fw);) { bw.write(content); s_logger.debug("created file: " + file + " in folder:" + folder); } catch (final IOException ex) { s_logger.debug("Failed to create file " + ex.getMessage()); return false; } } } } } s_logger.debug("Created the vm data in " + isoPath); } String s = null; try { final String cmd = "mkisofs -iso-level 3 -V " + configDriveLabel + " -o " + isoPath + vmName + ".iso " + isoPath; final Process p = Runtime.getRuntime().exec(cmd); final BufferedReader stdInput = new BufferedReader(new InputStreamReader(p.getInputStream(), Charset.defaultCharset())); final BufferedReader stdError = new BufferedReader(new InputStreamReader(p.getErrorStream(), Charset.defaultCharset())); // read the output from the command while ((s = stdInput.readLine()) != null) { s_logger.debug(s); } // read any errors from the attempted command while ((s = stdError.readLine()) != null) { s_logger.debug(s); } s_logger.debug(" Created config drive ISO using the command " + cmd + " in the host " + _host.getIp()); } catch (final IOException e) { s_logger.debug(e.getMessage()); return false; } return true; } public boolean copyConfigDriveIsoToHost(final Connection conn, final SR sr, final String vmName) { final String vmIso = "/tmp/" + vmName + "/configDrive/" + vmName + ".iso"; //scp file into the host final com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_host.getIp(), 22); try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password.peek())) { throw new CloudRuntimeException("Unable to authenticate"); } s_logger.debug("scp config drive iso file " + vmIso + " to host " + _host.getIp() + " path " + _configDriveIsopath); final SCPClient scp = new SCPClient(sshConnection); final String p = "0755"; scp.put(vmIso, _configDriveIsopath, p); sr.scan(conn); s_logger.debug("copied config drive iso to host " + _host); } catch (final IOException e) { s_logger.debug("failed to copy configdrive iso " + vmIso + " to host " + _host, e); return false; } catch (final XmlRpcException e) { s_logger.debug("Failed to scan config drive iso SR " + _configDriveSRName + _host.getIp() + " in host " + _host, e); return false; } finally { sshConnection.close(); //clean up the config drive files final String configDir = "/tmp/" + vmName; try { deleteLocalFolder(configDir); s_logger.debug("Successfully cleaned up config drive directory " + configDir + " after copying it to host "); } catch (final Exception e) { s_logger.debug("Failed to delete config drive folder :" + configDir + " for VM " + vmName + " " + e.getMessage()); } } return true; } public boolean attachConfigDriveIsoToVm(final Connection conn, final VM vm) throws XenAPIException, XmlRpcException { final String vmName = vm.getNameLabel(conn); final String isoURL = _configDriveIsopath + vmName + ".iso"; VDI srVdi; //1. find the vdi of the iso //2. find the vbd for the vdi //3. attach iso to vm try { final Set<VDI> vdis = VDI.getByNameLabel(conn, vmName + ".iso"); if (vdis.isEmpty()) { throw new CloudRuntimeException("Could not find ISO with URL: " + isoURL); } srVdi = vdis.iterator().next(); } catch (final XenAPIException e) { s_logger.debug("Unable to get config drive iso: " + isoURL + " due to " + e.toString()); return false; } catch (final Exception e) { s_logger.debug("Unable to get config drive iso: " + isoURL + " due to " + e.toString()); return false; } VBD isoVBD = null; // Find the VM's CD-ROM VBD final Set<VBD> vbds = vm.getVBDs(conn); for (final VBD vbd : vbds) { final Types.VbdType type = vbd.getType(conn); final VBD.Record vbdr = vbd.getRecord(conn); // if the device exists then attach it if (!vbdr.userdevice.equals(_attachIsoDeviceNum) && type == Types.VbdType.CD) { isoVBD = vbd; break; } } if (isoVBD == null) { //create vbd final VBD.Record cfgDriveVbdr = new VBD.Record(); cfgDriveVbdr.VM = vm; cfgDriveVbdr.empty = true; cfgDriveVbdr.bootable = false; cfgDriveVbdr.userdevice = "autodetect"; cfgDriveVbdr.mode = Types.VbdMode.RO; cfgDriveVbdr.type = Types.VbdType.CD; final VBD cfgDriveVBD = VBD.create(conn, cfgDriveVbdr); isoVBD = cfgDriveVBD; s_logger.debug("Created CD-ROM VBD for VM: " + vm); } if (isoVBD != null) { // If an ISO is already inserted, eject it if (isoVBD.getEmpty(conn) == false) { isoVBD.eject(conn); } try { // Insert the new ISO isoVBD.insert(conn, srVdi); s_logger.debug("Attached config drive iso to vm " + vmName); } catch (final XmlRpcException ex) { s_logger.debug("Failed to attach config drive iso to vm " + vmName); return false; } } return true; } public SR createLocalIsoSR(final Connection conn, final String srName) throws XenAPIException, XmlRpcException { // if config drive sr already exists then return SR sr = getSRByNameLabelandHost(conn, _configDriveSRName + _host.getIp()); if (sr != null) { s_logger.debug("Config drive SR already exist, returing it"); return sr; } try { final Map<String, String> deviceConfig = new HashMap<String, String>(); final com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_host.getIp(), 22); try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password.peek())) { throw new CloudRuntimeException("Unable to authenticate"); } final String cmd = "mkdir -p " + _configDriveIsopath; if (!SSHCmdHelper.sshExecuteCmd(sshConnection, cmd)) { throw new CloudRuntimeException("Cannot create directory configdrive_iso on XenServer hosts"); } } catch (final IOException e) { throw new CloudRuntimeException("Unable to create iso folder", e); } finally { sshConnection.close(); } s_logger.debug("Created the config drive SR " + srName + " folder path " + _configDriveIsopath); deviceConfig.put("location", _configDriveIsopath); deviceConfig.put("legacy_mode", "true"); final Host host = Host.getByUuid(conn, _host.getUuid()); final String type = SRType.ISO.toString(); sr = SR.create(conn, host, deviceConfig, new Long(0), _configDriveIsopath, "iso", type, "iso", false, new HashMap<String, String>()); sr.setNameLabel(conn, srName); sr.setNameDescription(conn, deviceConfig.get("location")); sr.scan(conn); s_logger.debug("Config drive ISO SR at the path " + _configDriveIsopath + " got created in host " + _host); return sr; } catch (final XenAPIException e) { final String msg = "createLocalIsoSR failed! mountpoint " + e.toString(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } catch (final Exception e) { final String msg = "createLocalIsoSR failed! mountpoint: due to " + e.getMessage(); s_logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } } public void deleteLocalFolder(final String directory) throws Exception { if (directory == null || directory.isEmpty()) { final String msg = "Invalid directory path (null/empty) detected. Cannot delete specified directory."; s_logger.debug(msg); throw new Exception(msg); } try { FileUtils.deleteDirectory(new File(directory)); } catch (final IOException e) { // IOException here means failure to delete. Not swallowing it here to // let the caller handle with appropriate contextual log message. throw e; } } protected SR getSRByNameLabel(Connection conn, String name) throws BadServerResponse, XenAPIException, XmlRpcException { Set<SR> srs = SR.getByNameLabel(conn, name); SR ressr = null; for (SR sr : srs) { Set<PBD> pbds; pbds = sr.getPBDs(conn); for (PBD pbd : pbds) { PBD.Record pbdr = pbd.getRecord(conn); if (pbdr.host != null) { ressr = sr; break; } } } return ressr; } public boolean attachConfigDriveToMigratedVm(Connection conn, String vmName, String ipAddr) { // attach the config drive in destination host try { s_logger.debug("Attaching config drive iso device for the VM " + vmName + " In host " + ipAddr); Set<VM> vms = VM.getByNameLabel(conn, vmName); SR sr = getSRByNameLabel(conn, _configDriveSRName + ipAddr); //Here you will find only two vdis with the <vmname>.iso. //one is from source host and second from dest host Set<VDI> vdis = VDI.getByNameLabel(conn, vmName + ".iso"); if (vdis.isEmpty()) { s_logger.debug("Could not find config drive ISO: " + vmName); return false; } VDI configdriveVdi = null; for (VDI vdi : vdis) { SR vdiSr = vdi.getSR(conn); if (vdiSr.getUuid(conn).equals(sr.getUuid(conn))) { //get this vdi to attach to vbd configdriveVdi = vdi; s_logger.debug("VDI for the config drive ISO " + vdi); } else { // delete the vdi in source host so that the <vmname>.iso file is get removed s_logger.debug("Removing the source host VDI for the config drive ISO " + vdi); vdi.destroy(conn); } } if (configdriveVdi == null) { s_logger.debug("Config drive ISO VDI is not found "); return false; } for (VM vm : vms) { //create vbd VBD.Record cfgDriveVbdr = new VBD.Record(); cfgDriveVbdr.VM = vm; cfgDriveVbdr.empty = true; cfgDriveVbdr.bootable = false; cfgDriveVbdr.userdevice = "autodetect"; cfgDriveVbdr.mode = Types.VbdMode.RO; cfgDriveVbdr.type = Types.VbdType.CD; VBD cfgDriveVBD = VBD.create(conn, cfgDriveVbdr); s_logger.debug("Inserting vbd " + configdriveVdi); cfgDriveVBD.insert(conn, configdriveVdi); break; } return true; } catch (BadServerResponse e) { s_logger.warn("Failed to attach config drive ISO to the VM " + vmName + " In host " + ipAddr + " due to a bad server response.", e); return false; } catch (XenAPIException e) { s_logger.warn("Failed to attach config drive ISO to the VM " + vmName + " In host " + ipAddr + " due to a xapi problem.", e); return false; } catch (XmlRpcException e) { s_logger.warn("Failed to attach config drive ISO to the VM " + vmName + " In host " + ipAddr + " due to a problem in a remote call.", e); return false; } } }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
java
1
0
nwg_panel/tools.py
#!/usr/bin/env python3 import os import sys import json import subprocess import stat import gi import nwg_panel.common gi.require_version('GdkPixbuf', '2.0') gi.require_version('Gtk', '3.0') gi.require_version('Gdk', '3.0') from gi.repository import Gtk, Gdk, GdkPixbuf from shutil import copyfile import nwg_panel.common try: import bluetooth except ModuleNotFoundError: pass try: import netifaces except ModuleNotFoundError: pass try: import psutil except: pass def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) def temp_dir(): if os.getenv("TMPDIR"): return os.getenv("TMPDIR") elif os.getenv("TEMP"): return os.getenv("TEMP") elif os.getenv("TMP"): return os.getenv("TMP") return "/tmp" def get_app_dirs(): desktop_dirs = [] home = os.getenv("HOME") xdg_data_home = os.getenv("XDG_DATA_HOME") xdg_data_dirs = os.getenv("XDG_DATA_DIRS") if os.getenv("XDG_DATA_DIRS") else "/usr/local/share/:/usr/share/" if xdg_data_home: desktop_dirs.append(os.path.join(xdg_data_home, "applications")) else: if home: desktop_dirs.append(os.path.join(home, ".local/share/applications")) for d in xdg_data_dirs.split(":"): desktop_dirs.append(os.path.join(d, "applications")) # Add flatpak dirs if not found in XDG_DATA_DIRS flatpak_dirs = [os.path.join(home, ".local/share/flatpak/exports/share/applications"), "/var/lib/flatpak/exports/share/applications"] for d in flatpak_dirs: if d not in desktop_dirs: desktop_dirs.append(d) return desktop_dirs def map_odd_desktop_files(): name2icon_dict = {} for d in nwg_panel.common.app_dirs: if os.path.exists(d): for path in os.listdir(d): if os.path.isfile(os.path.join(d, path)): if path.endswith(".desktop") and path.count(".") > 1: try: content = load_text_file(os.path.join(d, path)) except Exception as e: eprint(e) if content: for line in content.splitlines(): if line.startswith("[") and not line == "[Desktop Entry]": break if line.upper().startswith("ICON="): icon = line.split("=")[1] name2icon_dict[path] = icon break return name2icon_dict def get_icon_name(app_name): if not app_name: return "" # GIMP returns "app_id": null and for some reason "class": "Gimp-2.10" instead of just "gimp". # Until the GTK3 version is released, let's make an exception for GIMP. if "GIMP" in app_name.upper(): return "gimp" for d in nwg_panel.common.app_dirs: # This will work if the .desktop file name is app_id.desktop or wm_class.desktop path = os.path.join(d, "{}.desktop".format(app_name)) content = None if os.path.isfile(path): content = load_text_file(path) elif os.path.isfile(path.lower()): content = load_text_file(path.lower()) if content: for line in content.splitlines(): if line.upper().startswith("ICON"): return line.split("=")[1] # Search the dictionary made of .desktop files that use "reverse DNS"-style names, prepared on startup. # see: https://github.com/nwg-piotr/nwg-panel/issues/64 for key in nwg_panel.common.name2icon_dict.keys(): if app_name in key.split("."): return nwg_panel.common.name2icon_dict[key] def local_dir(): local_dir = os.path.join(os.path.join(os.getenv("HOME"), ".local/share/nwg-panel")) if not os.path.isdir(local_dir): print("Creating '{}'".format(local_dir)) os.mkdir(local_dir) return local_dir def get_config_dir(): """ Determine config dir path, create if not found, then create sub-dirs :return: config dir path """ xdg_config_home = os.getenv('XDG_CONFIG_HOME') config_home = xdg_config_home if xdg_config_home else os.path.join(os.getenv("HOME"), ".config") config_dir = os.path.join(config_home, "nwg-panel") if not os.path.isdir(config_dir): print("Creating '{}'".format(config_dir)) os.mkdir(config_dir) # Icon folders to store user-defined icon replacements folder = os.path.join(config_dir, "icons_light") if not os.path.isdir(folder): print("Creating '{}'".format(folder)) os.mkdir(folder) folder = os.path.join(config_dir, "icons_dark") if not os.path.isdir(os.path.join(folder)): print("Creating '{}'".format(folder)) os.mkdir(folder) folder = os.path.join(config_dir, "executors") if not os.path.isdir(os.path.join(folder)): print("Creating '{}'".format(folder)) os.mkdir(folder) return config_dir def copy_files(src_dir, dst_dir, restore=False): src_files = os.listdir(src_dir) for file in src_files: if os.path.isfile(os.path.join(src_dir, file)): if not os.path.isfile(os.path.join(dst_dir, file)) or restore: copyfile(os.path.join(src_dir, file), os.path.join(dst_dir, file)) print("Copying '{}'".format(os.path.join(dst_dir, file))) def copy_executors(src_dir, dst_dir): src_files = os.listdir(src_dir) for file in src_files: if os.path.isfile(os.path.join(src_dir, file)) and not os.path.isfile(os.path.join(dst_dir, file)): copyfile(os.path.join(src_dir, file), os.path.join(dst_dir, file)) print("Copying '{}', marking executable".format(os.path.join(dst_dir, file))) st = os.stat(os.path.join(dst_dir, file)) os.chmod(os.path.join(dst_dir, file), st.st_mode | stat.S_IEXEC) def load_text_file(path): try: with open(path, 'r') as file: data = file.read() return data except Exception as e: print(e) return None def load_json(path): try: with open(path, 'r') as f: return json.load(f) except Exception as e: print(e) sys.exit(1) def save_json(src_dict, path): with open(path, 'w') as f: json.dump(src_dict, f, indent=2) def save_string(string, file): try: file = open(file, "wt") file.write(string) file.close() except: print("Error writing file '{}'".format(file)) def load_string(path): try: with open(path, 'r') as file: data = file.read() return data except: return "" def load_autotiling(): autotiling = [] path = os.path.join(temp_dir(), "autotiling") try: for ws in load_string(path).split(","): autotiling.append(int(ws)) except: pass return autotiling def list_outputs(sway=False, tree=None, silent=False): """ Get output names and geometry from i3 tree, assign to Gdk.Display monitors. :return: {"name": str, "x": int, "y": int, "width": int, "height": int, "monitor": Gkd.Monitor} """ outputs_dict = {} if sway: if not silent: print("Running on sway") if not tree: tree = nwg_panel.common.i3.get_tree() for item in tree: if item.type == "output" and not item.name.startswith("__"): outputs_dict[item.name] = {"x": item.rect.x, "y": item.rect.y, "width": item.rect.width, "height": item.rect.height} elif os.getenv('WAYLAND_DISPLAY') is not None: if not silent: print("Running on Wayland, but not sway") if nwg_panel.common.commands["wlr-randr"]: lines = subprocess.check_output("wlr-randr", shell=True).decode("utf-8").strip().splitlines() if lines: name, w, h, x, y = None, None, None, None, None for line in lines: if not line.startswith(" "): name = line.split()[0] elif "current" in line: w_h = line.split()[0].split('x') w = int(w_h[0]) h = int(w_h[1]) elif "Position" in line: x_y = line.split()[1].split(',') x = int(x_y[0]) y = int(x_y[1]) if name is not None and w is not None and h is not None and x is not None and y is not None: outputs_dict[name] = {'name': name, 'x': x, 'y': y, 'width': w, 'height': h} else: print("'wlr-randr' command not found, terminating") sys.exit(1) display = Gdk.Display.get_default() for i in range(display.get_n_monitors()): monitor = display.get_monitor(i) geometry = monitor.get_geometry() for key in outputs_dict: if int(outputs_dict[key]["x"]) == geometry.x and int(outputs_dict[key]["y"]) == geometry.y: outputs_dict[key]["monitor"] = monitor return outputs_dict def check_key(dictionary, key, default_value): """ Adds a key w/ default value if missing from the dictionary """ if key not in dictionary: dictionary[key] = default_value def cmd2string(cmd): try: return subprocess.check_output(cmd, shell=True).decode("utf-8").strip() except subprocess.CalledProcessError: return "" def is_command(cmd): cmd = cmd.split()[0] # strip arguments cmd = "command -v {}".format(cmd) try: is_cmd = subprocess.check_output(cmd, shell=True).decode("utf-8").strip() if is_cmd: return True except subprocess.CalledProcessError: return False def check_commands(): for key in nwg_panel.common.commands: nwg_panel.common.commands[key] = is_command(key) try: import netifaces nwg_panel.common.commands["netifaces"] = True except ModuleNotFoundError: pass try: import bluetooth nwg_panel.common.commands["pybluez"] = True except ModuleNotFoundError: pass def get_volume(): vol = 0 muted = False if nwg_panel.common.commands["pamixer"]: try: output = cmd2string("pamixer --get-volume") if output: vol = int(cmd2string("pamixer --get-volume")) except Exception as e: eprint(e) try: muted = subprocess.check_output("pamixer --get-mute", shell=True).decode( "utf-8").strip() == "true" except subprocess.CalledProcessError: # the command above returns the 'disabled` status w/ CalledProcessError, exit status 1 pass else: eprint("Couldn't get volume, 'pamixer' not found") return vol, muted def list_sinks(): sinks = [] if nwg_panel.common.commands["pamixer"]: try: output = cmd2string("pamixer --list-sinks") if output: lines = output.splitlines()[1:] for line in lines: details = line.split() name = details[1][1:-1] desc = " ".join(details[2:])[1:-1] sinks.append({"name": name, "desc": desc}) except Exception as e: eprint(e) else: eprint("Couldn't list sinks, 'pamixer' not found") return sinks def toggle_mute(*args): if nwg_panel.common.commands["pamixer"]: vol, muted = get_volume() if muted: subprocess.call("pamixer -u".split()) else: subprocess.call("pamixer -m".split()) else: eprint("Couldn't toggle mute, 'pamixer' not found") def set_volume(slider): percent = int(slider.get_value()) if nwg_panel.common.commands["pamixer"]: subprocess.call("pamixer --set-volume {}".format(percent).split()) else: eprint("Couldn't set volume, 'pamixer' not found") def get_brightness(): brightness = 0 if nwg_panel.common.commands["light"]: try: output = cmd2string("light -G") brightness = int(round(float(output), 0)) except: pass else: eprint("Couldn't get brightness, 'light' not found") return brightness def set_brightness(slider): value = int(slider.get_value()) if value == 0: value = 1 if nwg_panel.common.commands["light"]: subprocess.call("{} {}".format("light -S", value).split()) else: eprint("Required 'light' command not found") def get_battery(): try: b = psutil.sensors_battery() percent = int(round(b.percent, 0)) charging = b.power_plugged seconds = b.secsleft if seconds != psutil.POWER_TIME_UNLIMITED and seconds != psutil.POWER_TIME_UNKNOWN: time = seconds2string(seconds) else: time = "" return percent, time, charging except: return 0, "", False def seconds2string(seconds): minutes, sec = divmod(seconds, 60) hrs, minutes = divmod(minutes, 60) hrs = str(hrs) if len(hrs) < 2: hrs = "0{}".format(hrs) minutes = str(minutes) if len(minutes) < 2: minutes = "0{}".format(minutes) return "{}:{}".format(hrs, minutes) def get_interface(name): try: addrs = netifaces.ifaddresses(name) list = addrs[netifaces.AF_INET] return list[0]["addr"] except: return None def player_status(): status = "install playerctl" if nwg_panel.common.commands["playerctl"]: try: status = cmd2string("playerctl status 2>&1") except: pass return status def player_metadata(): data = "" try: data = cmd2string("playerctl metadata --format '{{artist}} - {{title}}'") except: pass return data def update_image(image, icon_name, icon_size, icons_path=""): # In case a full path was given if icon_name and icon_name.startswith("/"): try: pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(icon_name, icon_size, icon_size) image.set_from_pixbuf(pixbuf) except: pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size( os.path.join(get_config_dir(), "icons_light/icon-missing.svg"), icon_size, icon_size) image.set_from_pixbuf(pixbuf) else: icon_theme = Gtk.IconTheme.get_default() if icons_path: path = "{}/{}.svg".format(icons_path, icon_name) try: pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(path, icon_size, icon_size) if image: image.set_from_pixbuf(pixbuf) except: try: pixbuf = icon_theme.load_icon(icon_name, icon_size, Gtk.IconLookupFlags.FORCE_SIZE) if image: image.set_from_pixbuf(pixbuf) except: pass else: try: pixbuf = icon_theme.load_icon(icon_name, icon_size, Gtk.IconLookupFlags.FORCE_SIZE) except: try: pixbuf = icon_theme.load_icon(icon_name.lower(), icon_size, Gtk.IconLookupFlags.FORCE_SIZE) except: path = os.path.join(get_config_dir(), "icons_light/icon-missing.svg") pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(path, icon_size, icon_size) if image: image.set_from_pixbuf(pixbuf) def create_pixbuf(icon_name, icon_size, icons_path=""): # In case a full path was given if icon_name.startswith("/"): try: pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size( icon_name, icon_size, icon_size) return pixbuf except: pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size( os.path.join(get_config_dir(), "icons_light/icon-missing.svg"), icon_size, icon_size) return pixbuf icon_theme = Gtk.IconTheme.get_default() if icons_path: path = "{}/{}.svg".format(icons_path, icon_name) try: pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size( path, icon_size, icon_size) return pixbuf except: try: pixbuf = icon_theme.load_icon(icon_name, icon_size, Gtk.IconLookupFlags.FORCE_SIZE) return pixbuf except: pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size( os.path.join(get_config_dir(), "icons_light/icon-missing.svg"), icon_size, icon_size) return pixbuf else: try: pixbuf = icon_theme.load_icon(icon_name, icon_size, Gtk.IconLookupFlags.FORCE_SIZE) return pixbuf except: pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size( os.path.join(get_config_dir(), "icons_light/icon-missing.svg"), icon_size, icon_size) return pixbuf def bt_adr(): try: adr = bluetooth.read_local_bdaddr() return adr[0] except: return "off" def list_configs(config_dir): configs = {} # allow to store json files other than panel config files in the config directory # (prevents from crash w/ nwg-drawer>=0.1.7 and future nwg-menu versions) exclusions = [os.path.join(config_dir, "preferred-apps.json")] entries = os.listdir(config_dir) entries.sort() for entry in entries: path = os.path.join(config_dir, entry) if os.path.isfile(path) and path not in exclusions and not path.endswith(".css"): try: with open(path, 'r') as f: config = json.load(f) configs[path] = config except: pass return configs
[]
[]
[ "XDG_DATA_DIRS", "XDG_DATA_HOME", "WAYLAND_DISPLAY", "TMP", "TMPDIR", "HOME", "XDG_CONFIG_HOME", "TEMP" ]
[]
["XDG_DATA_DIRS", "XDG_DATA_HOME", "WAYLAND_DISPLAY", "TMP", "TMPDIR", "HOME", "XDG_CONFIG_HOME", "TEMP"]
python
8
0
django_mailbox_abstract/tests/base.py
import email import os.path import time from django.conf import settings from django.test import TestCase from django_mailbox_abstract import models, utils from django_mailbox_abstract.models import Mailbox, Message class EmailIntegrationTimeout(Exception): pass def get_email_as_text(name): with open( os.path.join( os.path.dirname(__file__), 'messages', name, ), 'rb' ) as f: return f.read() class EmailMessageTestCase(TestCase): ALLOWED_EXTRA_HEADERS = [ 'MIME-Version', 'Content-Transfer-Encoding', ] def setUp(self): dm_settings = utils.get_settings() self._ALLOWED_MIMETYPES = dm_settings['allowed_mimetypes'] self._STRIP_UNALLOWED_MIMETYPES = ( dm_settings['strip_unallowed_mimetypes'] ) self._TEXT_STORED_MIMETYPES = dm_settings['text_stored_mimetypes'] self.mailbox = Mailbox.objects.create(from_email='[email protected]') self.test_account = os.environ.get('EMAIL_ACCOUNT') self.test_password = os.environ.get('EMAIL_PASSWORD') self.test_smtp_server = os.environ.get('EMAIL_SMTP_SERVER') self.test_from_email = '[email protected]' self.maximum_wait_seconds = 60 * 5 settings.EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' settings.EMAIL_HOST = self.test_smtp_server settings.EMAIL_PORT = 587 settings.EMAIL_HOST_USER = self.test_account settings.EMAIL_HOST_PASSWORD = self.test_password settings.EMAIL_USE_TLS = True super().setUp() def _get_new_messages(self, mailbox, condition=None): start_time = time.time() # wait until there is at least one message while time.time() - start_time < self.maximum_wait_seconds: messages = self.mailbox.get_new_mail(condition) try: # check if generator contains at least one element message = next(messages) yield message yield from messages return except StopIteration: time.sleep(5) raise EmailIntegrationTimeout() def _get_email_as_text(self, name): with open( os.path.join( os.path.dirname(__file__), 'messages', name, ), 'rb' ) as f: return f.read() def _get_email_object(self, name): copy = self._get_email_as_text(name) return email.message_from_bytes(copy) def _headers_identical(self, left, right, header=None): """ Check if headers are (close enough to) identical. * This is particularly tricky because Python 2.6, Python 2.7 and Python 3 each handle header strings slightly differently. This should mash away all of the differences, though. * This also has a small loophole in that when re-writing e-mail payload encodings, we re-build the Content-Type header, so if the header was originally unquoted, it will be quoted when rehydrating the e-mail message. """ if header.lower() == 'content-type': # Special case; given that we re-write the header, we'll be quoting # the new content type; we need to make sure that doesn't cause # this comparison to fail. Also, the case of the encoding could # be changed, etc. etc. etc. left = left.replace('"', '').upper() right = right.replace('"', '').upper() left = left.replace('\n\t', ' ').replace('\n ', ' ') right = right.replace('\n\t', ' ').replace('\n ', ' ') if right != left: return False return True def compare_email_objects(self, left, right): # Compare headers for key, value in left.items(): if not right[key] and key in self.ALLOWED_EXTRA_HEADERS: continue if not right[key]: raise AssertionError("Extra header '%s'" % key) if not self._headers_identical(right[key], value, header=key): raise AssertionError( "Header '{}' unequal:\n{}\n{}".format( key, repr(value), repr(right[key]), ) ) for key, value in right.items(): if not left[key] and key in self.ALLOWED_EXTRA_HEADERS: continue if not left[key]: raise AssertionError("Extra header '%s'" % key) if not self._headers_identical(left[key], value, header=key): raise AssertionError( "Header '{}' unequal:\n{}\n{}".format( key, repr(value), repr(right[key]), ) ) if left.is_multipart() != right.is_multipart(): self._raise_mismatched(left, right) if left.is_multipart(): left_payloads = left.get_payload() right_payloads = right.get_payload() if len(left_payloads) != len(right_payloads): self._raise_mismatched(left, right) for n in range(len(left_payloads)): self.compare_email_objects( left_payloads[n], right_payloads[n] ) else: if left.get_payload() is None or right.get_payload() is None: if left.get_payload() is None: if right.get_payload is not None: self._raise_mismatched(left, right) if right.get_payload() is None: if left.get_payload is not None: self._raise_mismatched(left, right) elif left.get_payload().strip() != right.get_payload().strip(): self._raise_mismatched(left, right) def _raise_mismatched(self, left, right): raise AssertionError( "Message payloads do not match:\n{}\n{}".format( left.as_string(), right.as_string() ) ) def assertEqual(self, left, right): # noqa: N802 if not isinstance(left, email.message.Message): return super().assertEqual(left, right) return self.compare_email_objects(left, right) def tearDown(self): for message in Message.objects.all(): message.delete() models.ALLOWED_MIMETYPES = self._ALLOWED_MIMETYPES models.STRIP_UNALLOWED_MIMETYPES = self._STRIP_UNALLOWED_MIMETYPES models.TEXT_STORED_MIMETYPES = self._TEXT_STORED_MIMETYPES self.mailbox.delete() super().tearDown()
[]
[]
[ "EMAIL_ACCOUNT", "EMAIL_SMTP_SERVER", "EMAIL_PASSWORD" ]
[]
["EMAIL_ACCOUNT", "EMAIL_SMTP_SERVER", "EMAIL_PASSWORD"]
python
3
0
ingestion/src/metadata/cmd.py
# Copyright 2021 Collate # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import pathlib import sys from typing import List, Optional, Tuple import click from pydantic import ValidationError from metadata.__version__ import get_metadata_version from metadata.cli.backup import run_backup from metadata.cli.docker import run_docker from metadata.config.common import load_config_file from metadata.ingestion.api.workflow import Workflow from metadata.profiler.profiler_runner import ProfilerRunner logger = logging.getLogger(__name__) logging.getLogger("urllib3").setLevel(logging.WARN) # Configure logger. BASE_LOGGING_FORMAT = ( "[%(asctime)s] %(levelname)-8s {%(name)s:%(lineno)d} - %(message)s" ) logging.basicConfig(format=BASE_LOGGING_FORMAT) @click.group() def check() -> None: pass @click.group() @click.version_option(get_metadata_version()) @click.option( "--debug/--no-debug", default=lambda: os.environ.get("METADATA_DEBUG", False) ) def metadata(debug: bool) -> None: if debug: logging.getLogger().setLevel(logging.INFO) logging.getLogger("metadata").setLevel(logging.DEBUG) else: logging.getLogger().setLevel(logging.WARNING) logging.getLogger("metadata").setLevel(logging.INFO) @metadata.command() @click.option( "-c", "--config", type=click.Path(exists=True, dir_okay=False), help="Workflow config", required=True, ) def ingest(config: str) -> None: """Main command for ingesting metadata into Metadata""" config_file = pathlib.Path(config) workflow_config = load_config_file(config_file) try: logger.debug(f"Using config: {workflow_config}") workflow = Workflow.create(workflow_config) except ValidationError as e: click.echo(e, err=True) sys.exit(1) workflow.execute() workflow.stop() ret = workflow.print_status() sys.exit(ret) @metadata.command() @click.option( "-c", "--config", type=click.Path(exists=True, dir_okay=False), help="Workflow config", required=True, ) def report(config: str) -> None: """Report command to generate static pages with metadata""" config_file = pathlib.Path(config) workflow_config = load_config_file(config_file) file_sink = {"type": "file", "config": {"filename": "/tmp/datasets.json"}} try: logger.info(f"Using config: {workflow_config}") if workflow_config.get("sink"): del workflow_config["sink"] workflow_config["sink"] = file_sink ### add json generation as the sink workflow = Workflow.create(workflow_config) except ValidationError as e: click.echo(e, err=True) sys.exit(1) workflow.execute() workflow.stop() ret = workflow.print_status() os.environ.setdefault( "DJANGO_SETTINGS_MODULE", "metadata_server.openmetadata.settings" ) try: from django.core.management import call_command from django.core.wsgi import get_wsgi_application application = get_wsgi_application() call_command("runserver", "localhost:8000") except ImportError as exc: logger.error(exc) raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc @metadata.command() @click.option( "-c", "--config", type=click.Path(exists=True, dir_okay=False), help="Profiler config", required=True, ) def profiler(config: str) -> None: """Main command for running data openmetadata and tests""" try: config_file = pathlib.Path(config) profiler_config = load_config_file(config_file) try: logger.info(f"Using config: {profiler_config}") profiler_runner = ProfilerRunner.create(profiler_config) except ValidationError as e: click.echo(e, err=True) sys.exit(1) logger.info(f"Running Profiler for {profiler_runner.config.profiler.type} ...") profile_results = profiler_runner.run_profiler() logger.info(f"Profiler Results") logger.info(f"{profile_results}") except Exception as e: logger.exception(f"Scan failed: {str(e)}") logger.info(f"Exiting with code 1") @metadata.command() @click.option("--start", help="Start release docker containers", is_flag=True) @click.option( "--stop", help="Stops openmetadata docker containers", is_flag=True, ) @click.option("--pause", help="Pause openmetadata docker containers", is_flag=True) @click.option( "--resume", help="Resume/Unpause openmetadata docker containers", is_flag=True ) @click.option( "--clean", help="Stops and remove openmetadata docker containers along with images, volumes, networks associated", is_flag=True, ) @click.option( "-f", "--file-path", help="Path to Local docker-compose.yml", type=click.Path(exists=True, dir_okay=False), required=False, ) def docker(start, stop, pause, resume, clean, file_path) -> None: """ Checks Docker Memory Allocation Run Latest Release Docker - metadata docker --start Run Local Docker - metadata docker --start -f path/to/docker-compose.yml """ run_docker(start, stop, pause, resume, clean, file_path) @metadata.command() @click.option( "-h", "--host", help="Host that runs the database", required=True, ) @click.option( "-u", "--user", help="User to run the backup", required=True, ) @click.option( "-p", "--password", help="Credentials for the user", required=True, ) @click.option( "-d", "--database", help="Database to backup", required=True, ) @click.option( "--port", help="Database service port", default="3306", required=False, ) @click.option( "--output", help="Local path to store the backup", type=click.Path(exists=False, dir_okay=True), default=None, required=False, ) @click.option( "--upload", help="S3 endpoint, bucket & key to upload the backup file", nargs=3, type=click.Tuple([str, str, str]), default=None, required=False, ) @click.option( "-o", "--options", multiple=True, default=["--protocol=tcp", "--no-tablespaces"] ) def backup( host: str, user: str, password: str, database: str, port: str, output: Optional[str], upload: Optional[Tuple[str, str, str]], options: List[str], ) -> None: """ Run a backup for the metadata DB. Requires mysqldump installed on the host. We can pass as many options as required with `-o <opt1>, -o <opt2> [...]` To run the upload, provide the information as `--upload endpoint bucket key` and properly configure the environment variables AWS_ACCESS_KEY_ID & AWS_SECRET_ACCESS_KEY """ run_backup(host, user, password, database, port, output, upload, options) metadata.add_command(check)
[]
[]
[ "METADATA_DEBUG" ]
[]
["METADATA_DEBUG"]
python
1
0
list_trello_boards.py
#!/usr/bin/env python3 import os import sys from trello import TrelloClient, ResourceUnavailable def usage(): print("""Usage: list_trello_boards show Shows ids and names of Trello boards that the user specified with TRELLO_API_KEY, TRELLO_API_SECRET, TRELLO_TOKEN and TRELLO_TOKEN_SECRET environment variables is a member of.""") def main(): if len(sys.argv) < 2 or sys.argv[1].lower() != 'show': usage() return api_key = os.getenv('TRELLO_API_KEY') api_secret = os.getenv('TRELLO_API_SECRET') api_token = os.getenv('TRELLO_TOKEN') api_token_secret = os.getenv('TRELLO_TOKEN_SECRET') client = TrelloClient( api_key=api_key, api_secret=api_secret, token=api_token, token_secret=api_token_secret ) try: boards = client.list_boards() except ResourceUnavailable as e: print('Fail:', e) return for b in boards: print(b.id, b.name) if __name__ == '__main__': main()
[]
[]
[ "TRELLO_TOKEN", "TRELLO_API_KEY", "TRELLO_TOKEN_SECRET", "TRELLO_API_SECRET" ]
[]
["TRELLO_TOKEN", "TRELLO_API_KEY", "TRELLO_TOKEN_SECRET", "TRELLO_API_SECRET"]
python
4
0
sunpy/net/fido_factory.py
""" This module provides the `Fido <sunpy.net.fido_factory.UnifiedDownloaderFactory>` instance of `sunpy.net.fido_factory.UnifiedDownloaderFactory` it also provides the `~sunpy.net.fido_factory.UnifiedResponse` class which `Fido.search <sunpy.net.fido_factory.UnifiedDownloaderFactory.search>` returns and the `~sunpy.net.fido_factory.DownloadResponse` class that is returned by `Fido.fetch <sunpy.net.fido_factory.UnifiedDownloaderFactory.fetch>`. """ # This module was initially developed under funding provided by Google Summer # of Code 2014 from __future__ import print_function, absolute_import from collections import Sequence from sunpy.util.datatype_factory_base import BasicRegistrationFactory from sunpy.util.datatype_factory_base import NoMatchError from sunpy.util.datatype_factory_base import MultipleMatchError from sunpy.net.dataretriever.clients import CLIENTS from sunpy.net.dataretriever.client import QueryResponse from sunpy.net.vso import VSOClient, QueryResponse as vsoQueryResponse from sunpy.net import attr from sunpy.net import attrs as a __all__ = ['Fido', 'UnifiedResponse', 'UnifiedDownloaderFactory', 'DownloadResponse'] class UnifiedResponse(Sequence): """ The object used to store results from `~sunpy.net.UnifiedDownloaderFactory.search`. The `~sunpy.net.Fido` object returns results from multiple different clients. So it is always possible to sub-select these results, you can index this object with two indices. The first index is the client index, i.e. corresponding to the results from the `~sunpy.net.vso.VSOClient`. The second index can be used to select records from the results returned from that client, for instance if you only want every second result you could index the second dimension with ``::2``. """ def __init__(self, lst): """ Parameters ---------- lst : `object` A single instance or an iterable of ``(QueryResponse, client)`` pairs or ``QueryResponse`` objects with a ``.client`` attribute. """ tmplst = [] # numfile is the number of files not the number of results. self._numfile = 0 if isinstance(lst, (QueryResponse, vsoQueryResponse)): if not hasattr(lst, 'client'): raise ValueError( ("A {} object is only a valid input to UnifiedResponse " "if it has a client attribute."). format(type(lst).__name__)) tmplst.append(lst) self._numfile = len(lst) else: for block in lst: if isinstance(block, tuple) and len(block) == 2: block[0].client = block[1] tmplst.append(block[0]) self._numfile += len(block[0]) elif hasattr(block, 'client'): tmplst.append(block) self._numfile += len(block) else: raise ValueError( "{} is not a valid input to UnifiedResponse.".format(type(lst))) self._list = tmplst def __len__(self): return len(self._list) def __iter__(self): return self.responses def _handle_record_slice(self, client_resp, record_slice): """ Given a slice to be applied to the results from a single client, return an object of the same type as client_resp. """ # When we subindex, we want to persist the type of the response object. resp_type = type(client_resp) # Make sure we always have an iterable, as most of the response objects # expect one. if isinstance(record_slice, int): resp = [client_resp[record_slice]] else: resp = client_resp[record_slice] # Reconstruct a response object with the sub-indexed records. ret = resp_type(resp) # Make sure we pass the client back out again. ret.client = client_resp.client return ret def __getitem__(self, aslice): """ Support slicing the UnifiedResponse as a 2D object. The first index is to the client and the second index is the records returned from those clients. """ # Just a single int as a slice, we are just indexing client. if isinstance(aslice, (int, slice)): ret = self._list[aslice] # Make sure we only have a length two slice. elif isinstance(aslice, tuple): if len(aslice) > 2: raise IndexError("UnifiedResponse objects can only " "be sliced with one or two indices.") # Indexing both client and records, but only for one client. if isinstance(aslice[0], int): client_resp = self._list[aslice[0]] ret = self._handle_record_slice(client_resp, aslice[1]) # Indexing both client and records for multiple clients. else: intermediate = self._list[aslice[0]] ret = [] for client_resp in intermediate: resp = self._handle_record_slice(client_resp, aslice[1]) ret.append(resp) else: raise IndexError("UnifiedResponse objects must be sliced with integers.") return UnifiedResponse(ret) def get_response(self, i): """ Get the actual response rather than another UnifiedResponse object. """ return self._list[i] def response_block_properties(self): """ Returns a set of class attributes on all the response blocks. Returns ------- s : list List of strings, containing attribute names in the response blocks. """ s = self.get_response(0).response_block_properties() for i in range(1, len(self)): s.intersection(self.get_response(i).response_block_properties()) return s @property def responses(self): """ A generator of all the `sunpy.net.dataretriever.client.QueryResponse` objects contained in the `~sunpy.net.fido_factory.UnifiedResponse` object. """ for i in range(len(self)): yield self.get_response(i) @property def file_num(self): return self._numfile def _repr_html_(self): nprov = len(self) if nprov == 1: ret = 'Results from {} Provider:</br></br>'.format(len(self)) else: ret = 'Results from {} Providers:</br></br>'.format(len(self)) for block in self.responses: ret += "{} Results from the {}:</br>".format(len(block), block.client.__class__.__name__) ret += block._repr_html_() ret += '</br>' return ret def __repr__(self): ret = super(UnifiedResponse, self).__repr__() ret += '\n' + str(self) return ret def __str__(self): nprov = len(self) if nprov == 1: ret = 'Results from {} Provider:\n\n'.format(len(self)) else: ret = 'Results from {} Providers:\n\n'.format(len(self)) for block in self.responses: ret += "{} Results from the {}:\n".format(len(block), block.client.__class__.__name__) lines = repr(block).split('\n') ret += '\n'.join(lines[1:]) ret += '\n\n' return ret class DownloadResponse(list): """ Object returned by clients servicing the query. """ def __init__(self, lst): super(DownloadResponse, self).__init__(lst) def wait(self, progress=True): """ Waits for all files to download completely and then return. Parameters ---------- progress : `bool` if true, display a progress bar. Returns ------- List of file paths to which files have been downloaded. """ filelist = [] for resobj in self: filelist.extend(resobj.wait(progress=progress)) return filelist """ Construct a simple AttrWalker to split up searches into blocks of attrs being 'anded' with AttrAnd. This pipeline only understands AttrAnd and AttrOr, Fido.search passes in an AttrAnd object of all the query parameters, if an AttrOr is encountered the query is split into the component parts of the OR, which at somepoint will end up being an AttrAnd object, at which point it is passed into _get_registered_widget. """ query_walker = attr.AttrWalker() @query_walker.add_creator(attr.AttrAnd) def _create_and(walker, query, factory): is_time = any([isinstance(x, a.Time) for x in query.attrs]) if not is_time: error = "The following part of the query did not have a time specified:\n" for at in query.attrs: error += str(at) + ', ' raise ValueError(error) # Return the response and the client return [factory._make_query_to_client(*query.attrs)] @query_walker.add_creator(attr.AttrOr) def _create_or(walker, query, factory): qblocks = [] for attrblock in query.attrs: qblocks.extend(walker.create(attr.and_(attrblock), factory)) return qblocks class UnifiedDownloaderFactory(BasicRegistrationFactory): """ sunpy.net.Fido(\*args, \*\*kwargs) Search and Download data from a variety of supported sources. """ def search(self, *query): """ Query for data in form of multiple parameters. Examples -------- Query for LYRALightCurve data for the time range ('2012/3/4','2012/3/6') >>> from sunpy.net import Fido, attrs as a >>> import astropy.units as u >>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'), a.Instrument('lyra')) # doctest: +REMOTE_DATA Query for data from Nobeyama Radioheliograph and RHESSI >>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'), ... (a.Instrument('norh') & a.Wavelength(17*u.GHz)) | a.Instrument('rhessi')) # doctest: +REMOTE_DATA Query for 304 Angstrom SDO AIA data with a cadence of 10 minutes >>> import astropy.units as u >>> from sunpy.net import Fido, attrs as a >>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'), ... a.Instrument('AIA'), ... a.Wavelength(304*u.angstrom, 304*u.angstrom), ... a.vso.Sample(10*u.minute)) # doctest: +REMOTE_DATA Parameters ---------- query : `sunpy.net.vso.attrs`, `sunpy.net.jsoc.attrs` A query consisting of multiple parameters which define the requested data. The query is specified using attributes from the VSO and the JSOC. The query can mix attributes from the VSO and the JSOC. Returns ------- `sunpy.net.fido_factory.UnifiedResponse` Container of responses returned by clients servicing query. Notes ----- The conjunction 'and' transforms query into disjunctive normal form ie. query is now of form A & B or ((A & B) | (C & D)) This helps in modularising query into parts and handling each of the parts individually. """ query = attr.and_(*query) return UnifiedResponse(query_walker.create(query, self)) # Python 3: this line should be like this # def fetch(self, *query_results, wait=True, progress=True, **kwargs): def fetch(self, *query_results, **kwargs): """ Download the records represented by `~sunpy.net.fido_factory.UnifiedResponse` objects. Parameters ---------- query_results : `sunpy.net.fido_factory.UnifiedResponse` Container returned by query method, or multiple. wait : `bool` fetch will wait until the download is complete before returning. progress : `bool` Show a progress bar while the download is running. Returns ------- `sunpy.net.fido_factory.DownloadResponse` Example -------- >>> from sunpy.net.vso.attrs import Time, Instrument >>> unifresp = Fido.search(Time('2012/3/4','2012/3/5'), Instrument('EIT')) # doctest: +REMOTE_DATA >>> downresp = Fido.fetch(unifresp) # doctest: +SKIP >>> file_paths = downresp.wait() # doctest: +SKIP """ wait = kwargs.pop("wait", True) progress = kwargs.pop("progress", True) reslist = [] for query_result in query_results: for block in query_result.responses: reslist.append(block.client.fetch(block, **kwargs)) results = DownloadResponse(reslist) if wait: return results.wait(progress=progress) else: return results def __call__(self, *args, **kwargs): raise TypeError("'{}' object is not callable".format(self.__class__.__name__)) def _check_registered_widgets(self, *args): """Factory helper function""" candidate_widget_types = list() for key in self.registry: if self.registry[key](*args): candidate_widget_types.append(key) n_matches = len(candidate_widget_types) if n_matches == 0: # There is no default client raise NoMatchError("This query was not understood by any clients. Did you miss an OR?") elif n_matches == 2: # If two clients have reported they understand this query, and one # of them is the VSOClient, then we ignore VSOClient. if VSOClient in candidate_widget_types: candidate_widget_types.remove(VSOClient) # Finally check that we only have one match. if len(candidate_widget_types) > 1: candidate_names = [cls.__name__ for cls in candidate_widget_types] raise MultipleMatchError("The following clients matched this query. " "Please make your query more specific.\n" "{}".format(candidate_names)) return candidate_widget_types def _make_query_to_client(self, *query): """ Given a query, look up the client and perform the query. Parameters ---------- query : collection of `~sunpy.net.vso.attr` objects Returns ------- response : `~sunpy.net.dataretriever.client.QueryResponse` client : `object` Instance of client class """ candidate_widget_types = self._check_registered_widgets(*query) tmpclient = candidate_widget_types[0]() return tmpclient.search(*query), tmpclient Fido = UnifiedDownloaderFactory( registry=CLIENTS, additional_validation_functions=['_can_handle_query'])
[]
[]
[]
[]
[]
python
null
null
null
cmd/postgres_exporter/postgres_exporter.go
package main import ( "crypto/sha256" "database/sql" "errors" "fmt" "io/ioutil" "math" "net/http" "net/url" "os" "regexp" "runtime" "strconv" "strings" "sync" "time" "github.com/blang/semver" "github.com/lib/pq" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/log" "github.com/prometheus/common/version" "gopkg.in/alecthomas/kingpin.v2" "gopkg.in/yaml.v2" ) // Branch is set during build to the git branch. var Branch string // BuildDate is set during build to the ISO-8601 date and time. var BuildDate string // Revision is set during build to the git commit revision. var Revision string // Version is set during build to the git describe version // (semantic version)-(commitish) form. var Version = "0.0.1-rev" // VersionShort is set during build to the semantic version. var VersionShort = "0.0.1" var ( listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9187").Envar("PG_EXPORTER_WEB_LISTEN_ADDRESS").String() metricPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").Envar("PG_EXPORTER_WEB_TELEMETRY_PATH").String() disableDefaultMetrics = kingpin.Flag("disable-default-metrics", "Do not include default metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_DEFAULT_METRICS").Bool() disableSettingsMetrics = kingpin.Flag("disable-settings-metrics", "Do not include pg_settings metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_SETTINGS_METRICS").Bool() autoDiscoverDatabases = kingpin.Flag("auto-discover-databases", "Whether to discover the databases on a server dynamically.").Default("false").Envar("PG_EXPORTER_AUTO_DISCOVER_DATABASES").Bool() queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run.").Default("").Envar("PG_EXPORTER_EXTEND_QUERY_PATH").String() onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool() constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,).").Default("").Envar("PG_EXPORTER_CONSTANT_LABELS").String() excludeDatabases = kingpin.Flag("exclude-databases", "A list of databases to remove when autoDiscoverDatabases is enabled").Default("").Envar("PG_EXPORTER_EXCLUDE_DATABASES").String() ) // Metric name parts. const ( // Namespace for all metrics. namespace = "pg" // Subsystems. exporter = "exporter" // Metric label used for static string data thats handy to send to Prometheus // e.g. version staticLabelName = "static" // Metric label used for server identification. serverLabelName = "server" ) // ColumnUsage should be one of several enum values which describe how a // queried row is to be converted to a Prometheus metric. type ColumnUsage int // nolint: golint const ( DISCARD ColumnUsage = iota // Ignore this column LABEL ColumnUsage = iota // Use this column as a label COUNTER ColumnUsage = iota // Use this column as a counter GAUGE ColumnUsage = iota // Use this column as a gauge MAPPEDMETRIC ColumnUsage = iota // Use this column with the supplied mapping of text values DURATION ColumnUsage = iota // This column should be interpreted as a text duration (and converted to milliseconds) ) // UnmarshalYAML implements the yaml.Unmarshaller interface. func (cu *ColumnUsage) UnmarshalYAML(unmarshal func(interface{}) error) error { var value string if err := unmarshal(&value); err != nil { return err } columnUsage, err := stringToColumnUsage(value) if err != nil { return err } *cu = columnUsage return nil } // MappingOptions is a copy of ColumnMapping used only for parsing type MappingOptions struct { Usage string `yaml:"usage"` Description string `yaml:"description"` Mapping map[string]float64 `yaml:"metric_mapping"` // Optional column mapping for MAPPEDMETRIC SupportedVersions semver.Range `yaml:"pg_version"` // Semantic version ranges which are supported. Unsupported columns are not queried (internally converted to DISCARD). } // nolint: golint type Mapping map[string]MappingOptions // nolint: golint type UserQuery struct { Query string `yaml:"query"` Metrics []Mapping `yaml:"metrics"` Master bool `yaml:"master"` // Querying only for master database CacheSeconds uint64 `yaml:"cache_seconds"` // Number of seconds to cache the namespace result metrics for. } // nolint: golint type UserQueries map[string]UserQuery // Regex used to get the "short-version" from the postgres version field. var versionRegex = regexp.MustCompile(`^\w+ ((\d+)(\.\d+)?(\.\d+)?)`) var lowestSupportedVersion = semver.MustParse("9.1.0") // Parses the version of postgres into the short version string we can use to // match behaviors. func parseVersion(versionString string) (semver.Version, error) { submatches := versionRegex.FindStringSubmatch(versionString) if len(submatches) > 1 { return semver.ParseTolerant(submatches[1]) } return semver.Version{}, errors.New(fmt.Sprintln("Could not find a postgres version in string:", versionString)) } // ColumnMapping is the user-friendly representation of a prometheus descriptor map type ColumnMapping struct { usage ColumnUsage `yaml:"usage"` description string `yaml:"description"` mapping map[string]float64 `yaml:"metric_mapping"` // Optional column mapping for MAPPEDMETRIC supportedVersions semver.Range `yaml:"pg_version"` // Semantic version ranges which are supported. Unsupported columns are not queried (internally converted to DISCARD). } // UnmarshalYAML implements yaml.Unmarshaller func (cm *ColumnMapping) UnmarshalYAML(unmarshal func(interface{}) error) error { type plain ColumnMapping return unmarshal((*plain)(cm)) } // intermediateMetricMap holds the partially loaded metric map parsing. // This is mainly so we can parse cacheSeconds around. type intermediateMetricMap struct { columnMappings map[string]ColumnMapping master bool cacheSeconds uint64 } // MetricMapNamespace groups metric maps under a shared set of labels. type MetricMapNamespace struct { labels []string // Label names for this namespace columnMappings map[string]MetricMap // Column mappings in this namespace master bool // Call query only for master database cacheSeconds uint64 // Number of seconds this metric namespace can be cached. 0 disables. } // MetricMap stores the prometheus metric description which a given column will // be mapped to by the collector type MetricMap struct { discard bool // Should metric be discarded during mapping? vtype prometheus.ValueType // Prometheus valuetype desc *prometheus.Desc // Prometheus descriptor conversion func(interface{}) (float64, bool) // Conversion function to turn PG result into float64 } // ErrorConnectToServer is a connection to PgSQL server error type ErrorConnectToServer struct { Msg string } // Error returns error func (e *ErrorConnectToServer) Error() string { return e.Msg } // TODO: revisit this with the semver system func dumpMaps() { // TODO: make this function part of the exporter for name, cmap := range builtinMetricMaps { query, ok := queryOverrides[name] if !ok { fmt.Println(name) } else { for _, queryOverride := range query { fmt.Println(name, queryOverride.versionRange, queryOverride.query) } } for column, details := range cmap.columnMappings { fmt.Printf(" %-40s %v\n", column, details) } fmt.Println() } } var builtinMetricMaps = map[string]intermediateMetricMap{ "pg_stat_bgwriter": { map[string]ColumnMapping{ "checkpoints_timed": {COUNTER, "Number of scheduled checkpoints that have been performed", nil, nil}, "checkpoints_req": {COUNTER, "Number of requested checkpoints that have been performed", nil, nil}, "checkpoint_write_time": {COUNTER, "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", nil, nil}, "checkpoint_sync_time": {COUNTER, "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", nil, nil}, "buffers_checkpoint": {COUNTER, "Number of buffers written during checkpoints", nil, nil}, "buffers_clean": {COUNTER, "Number of buffers written by the background writer", nil, nil}, "maxwritten_clean": {COUNTER, "Number of times the background writer stopped a cleaning scan because it had written too many buffers", nil, nil}, "buffers_backend": {COUNTER, "Number of buffers written directly by a backend", nil, nil}, "buffers_backend_fsync": {COUNTER, "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", nil, nil}, "buffers_alloc": {COUNTER, "Number of buffers allocated", nil, nil}, "stats_reset": {COUNTER, "Time at which these statistics were last reset", nil, nil}, }, true, 0, }, "pg_stat_database": { map[string]ColumnMapping{ "datid": {LABEL, "OID of a database", nil, nil}, "datname": {LABEL, "Name of this database", nil, nil}, "numbackends": {GAUGE, "Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.", nil, nil}, "xact_commit": {COUNTER, "Number of transactions in this database that have been committed", nil, nil}, "xact_rollback": {COUNTER, "Number of transactions in this database that have been rolled back", nil, nil}, "blks_read": {COUNTER, "Number of disk blocks read in this database", nil, nil}, "blks_hit": {COUNTER, "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)", nil, nil}, "tup_returned": {COUNTER, "Number of rows returned by queries in this database", nil, nil}, "tup_fetched": {COUNTER, "Number of rows fetched by queries in this database", nil, nil}, "tup_inserted": {COUNTER, "Number of rows inserted by queries in this database", nil, nil}, "tup_updated": {COUNTER, "Number of rows updated by queries in this database", nil, nil}, "tup_deleted": {COUNTER, "Number of rows deleted by queries in this database", nil, nil}, "conflicts": {COUNTER, "Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)", nil, nil}, "temp_files": {COUNTER, "Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting.", nil, nil}, "temp_bytes": {COUNTER, "Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.", nil, nil}, "deadlocks": {COUNTER, "Number of deadlocks detected in this database", nil, nil}, "blk_read_time": {COUNTER, "Time spent reading data file blocks by backends in this database, in milliseconds", nil, nil}, "blk_write_time": {COUNTER, "Time spent writing data file blocks by backends in this database, in milliseconds", nil, nil}, "stats_reset": {COUNTER, "Time at which these statistics were last reset", nil, nil}, }, true, 0, }, "pg_stat_database_conflicts": { map[string]ColumnMapping{ "datid": {LABEL, "OID of a database", nil, nil}, "datname": {LABEL, "Name of this database", nil, nil}, "confl_tablespace": {COUNTER, "Number of queries in this database that have been canceled due to dropped tablespaces", nil, nil}, "confl_lock": {COUNTER, "Number of queries in this database that have been canceled due to lock timeouts", nil, nil}, "confl_snapshot": {COUNTER, "Number of queries in this database that have been canceled due to old snapshots", nil, nil}, "confl_bufferpin": {COUNTER, "Number of queries in this database that have been canceled due to pinned buffers", nil, nil}, "confl_deadlock": {COUNTER, "Number of queries in this database that have been canceled due to deadlocks", nil, nil}, }, true, 0, }, "pg_locks": { map[string]ColumnMapping{ "datname": {LABEL, "Name of this database", nil, nil}, "mode": {LABEL, "Type of Lock", nil, nil}, "count": {GAUGE, "Number of locks", nil, nil}, }, true, 0, }, "pg_stat_replication": { map[string]ColumnMapping{ "procpid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange("<9.2.0")}, "pid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange(">=9.2.0")}, "usesysid": {DISCARD, "OID of the user logged into this WAL sender process", nil, nil}, "usename": {DISCARD, "Name of the user logged into this WAL sender process", nil, nil}, "application_name": {LABEL, "Name of the application that is connected to this WAL sender", nil, nil}, "client_addr": {LABEL, "IP address of the client connected to this WAL sender. If this field is null, it indicates that the client is connected via a Unix socket on the server machine.", nil, nil}, "client_hostname": {DISCARD, "Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This field will only be non-null for IP connections, and only when log_hostname is enabled.", nil, nil}, "client_port": {DISCARD, "TCP port number that the client is using for communication with this WAL sender, or -1 if a Unix socket is used", nil, nil}, "backend_start": {DISCARD, "with time zone Time when this process was started, i.e., when the client connected to this WAL sender", nil, nil}, "backend_xmin": {DISCARD, "The current backend's xmin horizon.", nil, nil}, "state": {LABEL, "Current WAL sender state", nil, nil}, "sent_location": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange("<10.0.0")}, "write_location": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange("<10.0.0")}, "flush_location": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange("<10.0.0")}, "replay_location": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange("<10.0.0")}, "sent_lsn": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange(">=10.0.0")}, "write_lsn": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange(">=10.0.0")}, "flush_lsn": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange(">=10.0.0")}, "replay_lsn": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange(">=10.0.0")}, "sync_priority": {DISCARD, "Priority of this standby server for being chosen as the synchronous standby", nil, nil}, "sync_state": {DISCARD, "Synchronous state of this standby server", nil, nil}, "slot_name": {LABEL, "A unique, cluster-wide identifier for the replication slot", nil, semver.MustParseRange(">=9.2.0")}, "plugin": {DISCARD, "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots", nil, nil}, "slot_type": {DISCARD, "The slot type - physical or logical", nil, nil}, "datoid": {DISCARD, "The OID of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil}, "database": {DISCARD, "The name of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil}, "active": {DISCARD, "True if this slot is currently actively being used", nil, nil}, "active_pid": {DISCARD, "Process ID of a WAL sender process", nil, nil}, "xmin": {DISCARD, "The oldest transaction that this slot needs the database to retain. VACUUM cannot remove tuples deleted by any later transaction", nil, nil}, "catalog_xmin": {DISCARD, "The oldest transaction affecting the system catalogs that this slot needs the database to retain. VACUUM cannot remove catalog tuples deleted by any later transaction", nil, nil}, "restart_lsn": {DISCARD, "The address (LSN) of oldest WAL which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints", nil, nil}, "pg_current_xlog_location": {DISCARD, "pg_current_xlog_location", nil, nil}, "pg_current_wal_lsn": {DISCARD, "pg_current_xlog_location", nil, semver.MustParseRange(">=10.0.0")}, "pg_current_wal_lsn_bytes": {GAUGE, "WAL position in bytes", nil, semver.MustParseRange(">=10.0.0")}, "pg_xlog_location_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=9.2.0 <10.0.0")}, "pg_wal_lsn_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=10.0.0")}, "confirmed_flush_lsn": {DISCARD, "LSN position a consumer of a slot has confirmed flushing the data received", nil, nil}, "write_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it (but not yet flushed it or applied it). This can be used to gauge the delay that synchronous_commit level remote_write incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")}, "flush_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it (but not yet applied it). This can be used to gauge the delay that synchronous_commit level remote_flush incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")}, "replay_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it. This can be used to gauge the delay that synchronous_commit level remote_apply incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")}, }, true, 0, }, "pg_stat_archiver": { map[string]ColumnMapping{ "archived_count": {COUNTER, "Number of WAL files that have been successfully archived", nil, nil}, "last_archived_wal": {DISCARD, "Name of the last WAL file successfully archived", nil, nil}, "last_archived_time": {DISCARD, "Time of the last successful archive operation", nil, nil}, "failed_count": {COUNTER, "Number of failed attempts for archiving WAL files", nil, nil}, "last_failed_wal": {DISCARD, "Name of the WAL file of the last failed archival operation", nil, nil}, "last_failed_time": {DISCARD, "Time of the last failed archival operation", nil, nil}, "stats_reset": {DISCARD, "Time at which these statistics were last reset", nil, nil}, "last_archive_age": {GAUGE, "Time in seconds since last WAL segment was successfully archived", nil, nil}, }, true, 0, }, "pg_stat_activity": { map[string]ColumnMapping{ "datname": {LABEL, "Name of this database", nil, nil}, "state": {LABEL, "connection state", nil, semver.MustParseRange(">=9.2.0")}, "count": {GAUGE, "number of connections in this state", nil, nil}, "max_tx_duration": {GAUGE, "max duration in seconds any active transaction has been running", nil, nil}, }, true, 0, }, } // OverrideQuery 's are run in-place of simple namespace look ups, and provide // advanced functionality. But they have a tendency to postgres version specific. // There aren't too many versions, so we simply store customized versions using // the semver matching we do for columns. type OverrideQuery struct { versionRange semver.Range query string } // Overriding queries for namespaces above. // TODO: validate this is a closed set in tests, and there are no overlaps var queryOverrides = map[string][]OverrideQuery{ "pg_locks": { { semver.MustParseRange(">0.0.0"), `SELECT pg_database.datname,tmp.mode,COALESCE(count,0) as count FROM ( VALUES ('accesssharelock'), ('rowsharelock'), ('rowexclusivelock'), ('shareupdateexclusivelock'), ('sharelock'), ('sharerowexclusivelock'), ('exclusivelock'), ('accessexclusivelock') ) AS tmp(mode) CROSS JOIN pg_database LEFT JOIN (SELECT database, lower(mode) AS mode,count(*) AS count FROM pg_locks WHERE database IS NOT NULL GROUP BY database, lower(mode) ) AS tmp2 ON tmp.mode=tmp2.mode and pg_database.oid = tmp2.database ORDER BY 1`, }, }, "pg_stat_replication": { { semver.MustParseRange(">=10.0.0"), ` SELECT *, (case pg_is_in_recovery() when 't' then null else pg_current_wal_lsn() end) AS pg_current_wal_lsn, (case pg_is_in_recovery() when 't' then null else pg_wal_lsn_diff(pg_current_wal_lsn(), pg_lsn('0/0'))::float end) AS pg_current_wal_lsn_bytes, (case pg_is_in_recovery() when 't' then null else pg_wal_lsn_diff(pg_current_wal_lsn(), replay_lsn)::float end) AS pg_wal_lsn_diff FROM pg_stat_replication `, }, { semver.MustParseRange(">=9.2.0 <10.0.0"), ` SELECT *, (case pg_is_in_recovery() when 't' then null else pg_current_xlog_location() end) AS pg_current_xlog_location, (case pg_is_in_recovery() when 't' then null else pg_xlog_location_diff(pg_current_xlog_location(), replay_location)::float end) AS pg_xlog_location_diff FROM pg_stat_replication `, }, { semver.MustParseRange("<9.2.0"), ` SELECT *, (case pg_is_in_recovery() when 't' then null else pg_current_xlog_location() end) AS pg_current_xlog_location FROM pg_stat_replication `, }, }, "pg_stat_archiver": { { semver.MustParseRange(">=0.0.0"), ` SELECT *, extract(epoch from now() - last_archived_time) AS last_archive_age FROM pg_stat_archiver `, }, }, "pg_stat_activity": { // This query only works { semver.MustParseRange(">=9.2.0"), ` SELECT pg_database.datname, tmp.state, COALESCE(count,0) as count, COALESCE(max_tx_duration,0) as max_tx_duration FROM ( VALUES ('active'), ('idle'), ('idle in transaction'), ('idle in transaction (aborted)'), ('fastpath function call'), ('disabled') ) AS tmp(state) CROSS JOIN pg_database LEFT JOIN ( SELECT datname, state, count(*) AS count, MAX(EXTRACT(EPOCH FROM now() - xact_start))::float AS max_tx_duration FROM pg_stat_activity GROUP BY datname,state) AS tmp2 ON tmp.state = tmp2.state AND pg_database.datname = tmp2.datname `, }, { semver.MustParseRange("<9.2.0"), ` SELECT datname, 'unknown' AS state, COALESCE(count(*),0) AS count, COALESCE(MAX(EXTRACT(EPOCH FROM now() - xact_start))::float,0) AS max_tx_duration FROM pg_stat_activity GROUP BY datname `, }, }, } // Convert the query override file to the version-specific query override file // for the exporter. func makeQueryOverrideMap(pgVersion semver.Version, queryOverrides map[string][]OverrideQuery) map[string]string { resultMap := make(map[string]string) for name, overrideDef := range queryOverrides { // Find a matching semver. We make it an error to have overlapping // ranges at test-time, so only 1 should ever match. matched := false for _, queryDef := range overrideDef { if queryDef.versionRange(pgVersion) { resultMap[name] = queryDef.query matched = true break } } if !matched { log.Warnln("No query matched override for", name, "- disabling metric space.") resultMap[name] = "" } } return resultMap } func parseUserQueries(content []byte) (map[string]intermediateMetricMap, map[string]string, error) { var userQueries UserQueries err := yaml.Unmarshal(content, &userQueries) if err != nil { return nil, nil, err } // Stores the loaded map representation metricMaps := make(map[string]intermediateMetricMap) newQueryOverrides := make(map[string]string) for metric, specs := range userQueries { log.Debugln("New user metric namespace from YAML:", metric, "Will cache results for:", specs.CacheSeconds) newQueryOverrides[metric] = specs.Query metricMap, ok := metricMaps[metric] if !ok { // Namespace for metric not found - add it. newMetricMap := make(map[string]ColumnMapping) metricMap = intermediateMetricMap{ columnMappings: newMetricMap, master: specs.Master, cacheSeconds: specs.CacheSeconds, } metricMaps[metric] = metricMap } for _, metric := range specs.Metrics { for name, mappingOption := range metric { var columnMapping ColumnMapping tmpUsage, _ := stringToColumnUsage(mappingOption.Usage) columnMapping.usage = tmpUsage columnMapping.description = mappingOption.Description // TODO: we should support cu columnMapping.mapping = nil // Should we support this for users? columnMapping.supportedVersions = nil metricMap.columnMappings[name] = columnMapping } } } return metricMaps, newQueryOverrides, nil } // Add queries to the builtinMetricMaps and queryOverrides maps. Added queries do not // respect version requirements, because it is assumed that the user knows // what they are doing with their version of postgres. // // This function modifies metricMap and queryOverrideMap to contain the new // queries. // TODO: test code for all cu. // TODO: the YAML this supports is "non-standard" - we should move away from it. func addQueries(content []byte, pgVersion semver.Version, server *Server) error { metricMaps, newQueryOverrides, err := parseUserQueries(content) if err != nil { return err } // Convert the loaded metric map into exporter representation partialExporterMap := makeDescMap(pgVersion, server.labels, metricMaps) // Merge the two maps (which are now quite flatteend) for k, v := range partialExporterMap { _, found := server.metricMap[k] if found { log.Debugln("Overriding metric", k, "from user YAML file.") } else { log.Debugln("Adding new metric", k, "from user YAML file.") } server.metricMap[k] = v } // Merge the query override map for k, v := range newQueryOverrides { _, found := server.queryOverrides[k] if found { log.Debugln("Overriding query override", k, "from user YAML file.") } else { log.Debugln("Adding new query override", k, "from user YAML file.") } server.queryOverrides[k] = v } return nil } // Turn the MetricMap column mapping into a prometheus descriptor mapping. func makeDescMap(pgVersion semver.Version, serverLabels prometheus.Labels, metricMaps map[string]intermediateMetricMap) map[string]MetricMapNamespace { var metricMap = make(map[string]MetricMapNamespace) for namespace, intermediateMappings := range metricMaps { thisMap := make(map[string]MetricMap) // Get the constant labels var variableLabels []string for columnName, columnMapping := range intermediateMappings.columnMappings { if columnMapping.usage == LABEL { variableLabels = append(variableLabels, columnName) } } for columnName, columnMapping := range intermediateMappings.columnMappings { // Check column version compatibility for the current map // Force to discard if not compatible. if columnMapping.supportedVersions != nil { if !columnMapping.supportedVersions(pgVersion) { // It's very useful to be able to see what columns are being // rejected. log.Debugln(columnName, "is being forced to discard due to version incompatibility.") thisMap[columnName] = MetricMap{ discard: true, conversion: func(_ interface{}) (float64, bool) { return math.NaN(), true }, } continue } } // Determine how to convert the column based on its usage. // nolint: dupl switch columnMapping.usage { case DISCARD, LABEL: thisMap[columnName] = MetricMap{ discard: true, conversion: func(_ interface{}) (float64, bool) { return math.NaN(), true }, } case COUNTER: thisMap[columnName] = MetricMap{ vtype: prometheus.CounterValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), conversion: func(in interface{}) (float64, bool) { return dbToFloat64(in) }, } case GAUGE: thisMap[columnName] = MetricMap{ vtype: prometheus.GaugeValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), conversion: func(in interface{}) (float64, bool) { return dbToFloat64(in) }, } case MAPPEDMETRIC: thisMap[columnName] = MetricMap{ vtype: prometheus.GaugeValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), conversion: func(in interface{}) (float64, bool) { text, ok := in.(string) if !ok { return math.NaN(), false } val, ok := columnMapping.mapping[text] if !ok { return math.NaN(), false } return val, true }, } case DURATION: thisMap[columnName] = MetricMap{ vtype: prometheus.GaugeValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_milliseconds", namespace, columnName), columnMapping.description, variableLabels, serverLabels), conversion: func(in interface{}) (float64, bool) { var durationString string switch t := in.(type) { case []byte: durationString = string(t) case string: durationString = t default: log.Errorln("DURATION conversion metric was not a string") return math.NaN(), false } if durationString == "-1" { return math.NaN(), false } d, err := time.ParseDuration(durationString) if err != nil { log.Errorln("Failed converting result to metric:", columnName, in, err) return math.NaN(), false } return float64(d / time.Millisecond), true }, } } } metricMap[namespace] = MetricMapNamespace{variableLabels, thisMap, intermediateMappings.master, intermediateMappings.cacheSeconds} } return metricMap } // convert a string to the corresponding ColumnUsage func stringToColumnUsage(s string) (ColumnUsage, error) { var u ColumnUsage var err error switch s { case "DISCARD": u = DISCARD case "LABEL": u = LABEL case "COUNTER": u = COUNTER case "GAUGE": u = GAUGE case "MAPPEDMETRIC": u = MAPPEDMETRIC case "DURATION": u = DURATION default: err = fmt.Errorf("wrong ColumnUsage given : %s", s) } return u, err } // Convert database.sql types to float64s for Prometheus consumption. Null types are mapped to NaN. string and []byte // types are mapped as NaN and !ok func dbToFloat64(t interface{}) (float64, bool) { switch v := t.(type) { case int64: return float64(v), true case float64: return v, true case time.Time: return float64(v.Unix()), true case []byte: // Try and convert to string and then parse to a float64 strV := string(v) result, err := strconv.ParseFloat(strV, 64) if err != nil { log.Infoln("Could not parse []byte:", err) return math.NaN(), false } return result, true case string: result, err := strconv.ParseFloat(v, 64) if err != nil { log.Infoln("Could not parse string:", err) return math.NaN(), false } return result, true case bool: if v { return 1.0, true } return 0.0, true case nil: return math.NaN(), true default: return math.NaN(), false } } // Convert database.sql to string for Prometheus labels. Null types are mapped to empty strings. func dbToString(t interface{}) (string, bool) { switch v := t.(type) { case int64: return fmt.Sprintf("%v", v), true case float64: return fmt.Sprintf("%v", v), true case time.Time: return fmt.Sprintf("%v", v.Unix()), true case nil: return "", true case []byte: // Try and convert to string return string(v), true case string: return v, true case bool: if v { return "true", true } return "false", true default: return "", false } } func parseFingerprint(url string) (string, error) { dsn, err := pq.ParseURL(url) if err != nil { dsn = url } pairs := strings.Split(dsn, " ") kv := make(map[string]string, len(pairs)) for _, pair := range pairs { splitted := strings.SplitN(pair, "=", 2) if len(splitted) != 2 { return "", fmt.Errorf("malformed dsn %q", dsn) } kv[splitted[0]] = splitted[1] } var fingerprint string if host, ok := kv["host"]; ok { fingerprint += host } else { fingerprint += "localhost" } if port, ok := kv["port"]; ok { fingerprint += ":" + port } else { fingerprint += ":5432" } return fingerprint, nil } func loggableDSN(dsn string) string { pDSN, err := url.Parse(dsn) if err != nil { return "could not parse DATA_SOURCE_NAME" } // Blank user info if not nil if pDSN.User != nil { pDSN.User = url.UserPassword(pDSN.User.Username(), "PASSWORD_REMOVED") } return pDSN.String() } type cachedMetrics struct { metrics []prometheus.Metric lastScrape time.Time } // Server describes a connection to Postgres. // Also it contains metrics map and query overrides. type Server struct { db *sql.DB labels prometheus.Labels master bool // Last version used to calculate metric map. If mismatch on scrape, // then maps are recalculated. lastMapVersion semver.Version // Currently active metric map metricMap map[string]MetricMapNamespace // Currently active query overrides queryOverrides map[string]string mappingMtx sync.RWMutex // Currently cached metrics metricCache map[string]cachedMetrics cacheMtx sync.Mutex } // ServerOpt configures a server. type ServerOpt func(*Server) // ServerWithLabels configures a set of labels. func ServerWithLabels(labels prometheus.Labels) ServerOpt { return func(s *Server) { for k, v := range labels { s.labels[k] = v } } } // NewServer establishes a new connection using DSN. func NewServer(dsn string, opts ...ServerOpt) (*Server, error) { fingerprint, err := parseFingerprint(dsn) if err != nil { return nil, err } db, err := sql.Open("postgres", dsn) if err != nil { return nil, err } db.SetMaxOpenConns(1) db.SetMaxIdleConns(1) log.Infof("Established new database connection to %q.", fingerprint) s := &Server{ db: db, master: false, labels: prometheus.Labels{ serverLabelName: fingerprint, }, metricCache: make(map[string]cachedMetrics), } for _, opt := range opts { opt(s) } return s, nil } // Close disconnects from Postgres. func (s *Server) Close() error { return s.db.Close() } // Ping checks connection availability and possibly invalidates the connection if it fails. func (s *Server) Ping() error { if err := s.db.Ping(); err != nil { if cerr := s.Close(); cerr != nil { log.Errorf("Error while closing non-pinging DB connection to %q: %v", s, cerr) } return err } return nil } // String returns server's fingerprint. func (s *Server) String() string { return s.labels[serverLabelName] } // Scrape loads metrics. func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool) error { s.mappingMtx.RLock() defer s.mappingMtx.RUnlock() var err error if !disableSettingsMetrics && s.master { if err = querySettings(ch, s); err != nil { err = fmt.Errorf("error retrieving settings: %s", err) } } errMap := queryNamespaceMappings(ch, s) if len(errMap) > 0 { err = fmt.Errorf("queryNamespaceMappings returned %d errors", len(errMap)) } return err } // Servers contains a collection of servers to Postgres. type Servers struct { m sync.Mutex servers map[string]*Server opts []ServerOpt } // NewServers creates a collection of servers to Postgres. func NewServers(opts ...ServerOpt) *Servers { return &Servers{ servers: make(map[string]*Server), opts: opts, } } // GetServer returns established connection from a collection. func (s *Servers) GetServer(dsn string) (*Server, error) { s.m.Lock() defer s.m.Unlock() var err error var ok bool errCount := 0 // start at zero because we increment before doing work retries := 3 var server *Server for { if errCount++; errCount > retries { return nil, err } server, ok = s.servers[dsn] if !ok { server, err = NewServer(dsn, s.opts...) if err != nil { time.Sleep(time.Duration(errCount) * time.Second) continue } s.servers[dsn] = server } if err = server.Ping(); err != nil { delete(s.servers, dsn) time.Sleep(time.Duration(errCount) * time.Second) continue } break } return server, nil } // Close disconnects from all known servers. func (s *Servers) Close() { s.m.Lock() defer s.m.Unlock() for _, server := range s.servers { if err := server.Close(); err != nil { log.Errorf("failed to close connection to %q: %v", server, err) } } } // Exporter collects Postgres metrics. It implements prometheus.Collector. type Exporter struct { // Holds a reference to the build in column mappings. Currently this is for testing purposes // only, since it just points to the global. builtinMetricMaps map[string]intermediateMetricMap disableDefaultMetrics, disableSettingsMetrics, autoDiscoverDatabases bool excludeDatabases []string dsn []string userQueriesPath string constantLabels prometheus.Labels duration prometheus.Gauge error prometheus.Gauge psqlUp prometheus.Gauge userQueriesError *prometheus.GaugeVec totalScrapes prometheus.Counter // servers are used to allow re-using the DB connection between scrapes. // servers contains metrics map and query overrides. servers *Servers } // ExporterOpt configures Exporter. type ExporterOpt func(*Exporter) // DisableDefaultMetrics configures default metrics export. func DisableDefaultMetrics(b bool) ExporterOpt { return func(e *Exporter) { e.disableDefaultMetrics = b } } // DisableSettingsMetrics configures pg_settings export. func DisableSettingsMetrics(b bool) ExporterOpt { return func(e *Exporter) { e.disableSettingsMetrics = b } } // AutoDiscoverDatabases allows scraping all databases on a database server. func AutoDiscoverDatabases(b bool) ExporterOpt { return func(e *Exporter) { e.autoDiscoverDatabases = b } } // ExcludeDatabases allows to filter out result from AutoDiscoverDatabases func ExcludeDatabases(s string) ExporterOpt { return func(e *Exporter) { e.excludeDatabases = strings.Split(s, ",") } } // WithUserQueriesPath configures user's queries path. func WithUserQueriesPath(p string) ExporterOpt { return func(e *Exporter) { e.userQueriesPath = p } } // WithConstantLabels configures constant labels. func WithConstantLabels(s string) ExporterOpt { return func(e *Exporter) { e.constantLabels = parseConstLabels(s) } } func parseConstLabels(s string) prometheus.Labels { labels := make(prometheus.Labels) s = strings.TrimSpace(s) if len(s) == 0 { return labels } parts := strings.Split(s, ",") for _, p := range parts { keyValue := strings.Split(strings.TrimSpace(p), "=") if len(keyValue) != 2 { log.Errorf(`Wrong constant labels format %q, should be "key=value"`, p) continue } key := strings.TrimSpace(keyValue[0]) value := strings.TrimSpace(keyValue[1]) if key == "" || value == "" { continue } labels[key] = value } return labels } // NewExporter returns a new PostgreSQL exporter for the provided DSN. func NewExporter(dsn []string, opts ...ExporterOpt) *Exporter { e := &Exporter{ dsn: dsn, builtinMetricMaps: builtinMetricMaps, } for _, opt := range opts { opt(e) } e.setupInternalMetrics() e.setupServers() return e } func (e *Exporter) setupServers() { e.servers = NewServers(ServerWithLabels(e.constantLabels)) } func (e *Exporter) setupInternalMetrics() { e.duration = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: exporter, Name: "last_scrape_duration_seconds", Help: "Duration of the last scrape of metrics from PostgresSQL.", ConstLabels: e.constantLabels, }) e.totalScrapes = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: exporter, Name: "scrapes_total", Help: "Total number of times PostgresSQL was scraped for metrics.", ConstLabels: e.constantLabels, }) e.error = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: exporter, Name: "last_scrape_error", Help: "Whether the last scrape of metrics from PostgreSQL resulted in an error (1 for error, 0 for success).", ConstLabels: e.constantLabels, }) e.psqlUp = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "up", Help: "Whether the last scrape of metrics from PostgreSQL was able to connect to the server (1 for yes, 0 for no).", ConstLabels: e.constantLabels, }) e.userQueriesError = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: exporter, Name: "user_queries_load_error", Help: "Whether the user queries file was loaded and parsed successfully (1 for error, 0 for success).", ConstLabels: e.constantLabels, }, []string{"filename", "hashsum"}) } // Describe implements prometheus.Collector. func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { } // Collect implements prometheus.Collector. func (e *Exporter) Collect(ch chan<- prometheus.Metric) { e.scrape(ch) ch <- e.duration ch <- e.totalScrapes ch <- e.error ch <- e.psqlUp e.userQueriesError.Collect(ch) } func newDesc(subsystem, name, help string, labels prometheus.Labels) *prometheus.Desc { return prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, name), help, nil, labels, ) } func queryDatabases(server *Server) ([]string, error) { rows, err := server.db.Query("SELECT datname FROM pg_database WHERE datallowconn = true AND datistemplate = false AND datname != current_database()") // nolint: safesql if err != nil { return nil, fmt.Errorf("Error retrieving databases: %v", err) } defer rows.Close() // nolint: errcheck var databaseName string result := make([]string, 0) for rows.Next() { err = rows.Scan(&databaseName) if err != nil { return nil, errors.New(fmt.Sprintln("Error retrieving rows:", err)) } result = append(result, databaseName) } return result, nil } // Query within a namespace mapping and emit metrics. Returns fatal errors if // the scrape fails, and a slice of errors if they were non-fatal. func queryNamespaceMapping(server *Server, namespace string, mapping MetricMapNamespace) ([]prometheus.Metric, []error, error) { // Check for a query override for this namespace query, found := server.queryOverrides[namespace] // Was this query disabled (i.e. nothing sensible can be queried on cu // version of PostgreSQL? if query == "" && found { // Return success (no pertinent data) return []prometheus.Metric{}, []error{}, nil } // Don't fail on a bad scrape of one metric var rows *sql.Rows var err error if !found { // I've no idea how to avoid this properly at the moment, but this is // an admin tool so you're not injecting SQL right? rows, err = server.db.Query(fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas, safesql } else { rows, err = server.db.Query(query) // nolint: safesql } if err != nil { return []prometheus.Metric{}, []error{}, fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err) } defer rows.Close() // nolint: errcheck var columnNames []string columnNames, err = rows.Columns() if err != nil { return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving column list for: ", namespace, err)) } // Make a lookup map for the column indices var columnIdx = make(map[string]int, len(columnNames)) for i, n := range columnNames { columnIdx[n] = i } var columnData = make([]interface{}, len(columnNames)) var scanArgs = make([]interface{}, len(columnNames)) for i := range columnData { scanArgs[i] = &columnData[i] } nonfatalErrors := []error{} metrics := make([]prometheus.Metric, 0) for rows.Next() { err = rows.Scan(scanArgs...) if err != nil { return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err)) } // Get the label values for this row. labels := make([]string, len(mapping.labels)) for idx, label := range mapping.labels { labels[idx], _ = dbToString(columnData[columnIdx[label]]) } // Loop over column names, and match to scan data. Unknown columns // will be filled with an untyped metric number *if* they can be // converted to float64s. NULLs are allowed and treated as NaN. for idx, columnName := range columnNames { var metric prometheus.Metric if metricMapping, ok := mapping.columnMappings[columnName]; ok { // Is this a metricy metric? if metricMapping.discard { continue } value, ok := dbToFloat64(columnData[idx]) if !ok { nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, columnData[idx]))) continue } // Generate the metric metric = prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...) } else { // Unknown metric. Report as untyped if scan to float64 works, else note an error too. metricLabel := fmt.Sprintf("%s_%s", namespace, columnName) desc := prometheus.NewDesc(metricLabel, fmt.Sprintf("Unknown metric from %s", namespace), mapping.labels, server.labels) // Its not an error to fail here, since the values are // unexpected anyway. value, ok := dbToFloat64(columnData[idx]) if !ok { nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unparseable column type - discarding: ", namespace, columnName, err))) continue } metric = prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, value, labels...) } metrics = append(metrics, metric) } } return metrics, nonfatalErrors, nil } // Iterate through all the namespace mappings in the exporter and run their // queries. func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server) map[string]error { // Return a map of namespace -> errors namespaceErrors := make(map[string]error) scrapeStart := time.Now() for namespace, mapping := range server.metricMap { log.Debugln("Querying namespace: ", namespace) if mapping.master && !server.master { log.Debugln("Query skipped...") continue } scrapeMetric := false // Check if the metric is cached server.cacheMtx.Lock() cachedMetric, found := server.metricCache[namespace] server.cacheMtx.Unlock() // If found, check if needs refresh from cache if found { if scrapeStart.Sub(cachedMetric.lastScrape).Seconds() > float64(mapping.cacheSeconds) { scrapeMetric = true } } else { scrapeMetric = true } var metrics []prometheus.Metric var nonFatalErrors []error var err error if scrapeMetric { metrics, nonFatalErrors, err = queryNamespaceMapping(server, namespace, mapping) } else { metrics = cachedMetric.metrics } // Serious error - a namespace disappeared if err != nil { namespaceErrors[namespace] = err log.Infoln(err) } // Non-serious errors - likely version or parsing problems. if len(nonFatalErrors) > 0 { for _, err := range nonFatalErrors { log.Infoln(err.Error()) } } // Emit the metrics into the channel for _, metric := range metrics { ch <- metric } if scrapeMetric { // Only cache if metric is meaningfully cacheable if mapping.cacheSeconds > 0 { server.cacheMtx.Lock() server.metricCache[namespace] = cachedMetrics{ metrics: metrics, lastScrape: scrapeStart, } server.cacheMtx.Unlock() } } } return namespaceErrors } // Check and update the exporters query maps if the version has changed. func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, server *Server) error { log.Debugf("Querying Postgres Version on %q", server) versionRow := server.db.QueryRow("SELECT version();") var versionString string err := versionRow.Scan(&versionString) if err != nil { return fmt.Errorf("Error scanning version string on %q: %v", server, err) } semanticVersion, err := parseVersion(versionString) if err != nil { return fmt.Errorf("Error parsing version string on %q: %v", server, err) } if !e.disableDefaultMetrics && semanticVersion.LT(lowestSupportedVersion) { log.Warnf("PostgreSQL version is lower on %q then our lowest supported version! Got %s minimum supported is %s.", server, semanticVersion, lowestSupportedVersion) } // Check if semantic version changed and recalculate maps if needed. if semanticVersion.NE(server.lastMapVersion) || server.metricMap == nil { log.Infof("Semantic Version Changed on %q: %s -> %s", server, server.lastMapVersion, semanticVersion) server.mappingMtx.Lock() // Get Default Metrics only for master database if !e.disableDefaultMetrics && server.master { server.metricMap = makeDescMap(semanticVersion, server.labels, e.builtinMetricMaps) server.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides) } else { server.metricMap = make(map[string]MetricMapNamespace) server.queryOverrides = make(map[string]string) } server.lastMapVersion = semanticVersion if e.userQueriesPath != "" { // Clear the metric while a reload is happening e.userQueriesError.Reset() // Calculate the hashsum of the useQueries userQueriesData, err := ioutil.ReadFile(e.userQueriesPath) if err != nil { log.Errorln("Failed to reload user queries:", e.userQueriesPath, err) e.userQueriesError.WithLabelValues(e.userQueriesPath, "").Set(1) } else { hashsumStr := fmt.Sprintf("%x", sha256.Sum256(userQueriesData)) if err := addQueries(userQueriesData, semanticVersion, server); err != nil { log.Errorln("Failed to reload user queries:", e.userQueriesPath, err) e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(1) } else { // Mark user queries as successfully loaded e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(0) } } } server.mappingMtx.Unlock() } // Output the version as a special metric only for master database versionDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, staticLabelName), "Version string as reported by postgres", []string{"version", "short_version"}, server.labels) if !e.disableDefaultMetrics && server.master { ch <- prometheus.MustNewConstMetric(versionDesc, prometheus.UntypedValue, 1, versionString, semanticVersion.String()) } return nil } func (e *Exporter) scrape(ch chan<- prometheus.Metric) { defer func(begun time.Time) { e.duration.Set(time.Since(begun).Seconds()) }(time.Now()) e.totalScrapes.Inc() dsns := e.dsn if e.autoDiscoverDatabases { dsns = e.discoverDatabaseDSNs() } var errorsCount int var connectionErrorsCount int for _, dsn := range dsns { if err := e.scrapeDSN(ch, dsn); err != nil { errorsCount++ log.Errorf(err.Error()) if _, ok := err.(*ErrorConnectToServer); ok { connectionErrorsCount++ } } } switch { case connectionErrorsCount >= len(dsns): e.psqlUp.Set(0) default: e.psqlUp.Set(1) // Didn't fail, can mark connection as up for this scrape. } switch errorsCount { case 0: e.error.Set(0) default: e.error.Set(1) } } func (e *Exporter) discoverDatabaseDSNs() []string { dsns := make(map[string]struct{}) for _, dsn := range e.dsn { parsedDSN, err := url.Parse(dsn) if err != nil { log.Errorf("Unable to parse DSN (%s): %v", loggableDSN(dsn), err) continue } server, err := e.servers.GetServer(dsn) if err != nil { log.Errorf("Error opening connection to database (%s): %v", loggableDSN(dsn), err) continue } dsns[dsn] = struct{}{} // If autoDiscoverDatabases is true, set first dsn as master database (Default: false) server.master = true databaseNames, err := queryDatabases(server) if err != nil { log.Errorf("Error querying databases (%s): %v", loggableDSN(dsn), err) continue } for _, databaseName := range databaseNames { if contains(e.excludeDatabases, databaseName) { continue } parsedDSN.Path = databaseName dsns[parsedDSN.String()] = struct{}{} } } result := make([]string, len(dsns)) index := 0 for dsn := range dsns { result[index] = dsn index++ } return result } func (e *Exporter) scrapeDSN(ch chan<- prometheus.Metric, dsn string) error { server, err := e.servers.GetServer(dsn) if err != nil { return &ErrorConnectToServer{fmt.Sprintf("Error opening connection to database (%s): %s", loggableDSN(dsn), err.Error())} } // Check if autoDiscoverDatabases is false, set dsn as master database (Default: false) if !e.autoDiscoverDatabases { server.master = true } // Check if map versions need to be updated if err := e.checkMapVersions(ch, server); err != nil { log.Warnln("Proceeding with outdated query maps, as the Postgres version could not be determined:", err) } return server.Scrape(ch, e.disableSettingsMetrics) } // try to get the DataSource // DATA_SOURCE_NAME always wins so we do not break older versions // reading secrets from files wins over secrets in environment variables // DATA_SOURCE_NAME > DATA_SOURCE_{USER|PASS}_FILE > DATA_SOURCE_{USER|PASS} func getDataSources() []string { var dsn = os.Getenv("DATA_SOURCE_NAME") if len(dsn) == 0 { var user string var pass string var uri string if len(os.Getenv("DATA_SOURCE_USER_FILE")) != 0 { fileContents, err := ioutil.ReadFile(os.Getenv("DATA_SOURCE_USER_FILE")) if err != nil { panic(err) } user = strings.TrimSpace(string(fileContents)) } else { user = os.Getenv("DATA_SOURCE_USER") } if len(os.Getenv("DATA_SOURCE_PASS_FILE")) != 0 { fileContents, err := ioutil.ReadFile(os.Getenv("DATA_SOURCE_PASS_FILE")) if err != nil { panic(err) } pass = strings.TrimSpace(string(fileContents)) } else { pass = os.Getenv("DATA_SOURCE_PASS") } ui := url.UserPassword(user, pass).String() if len(os.Getenv("DATA_SOURCE_URI_FILE")) != 0 { fileContents, err := ioutil.ReadFile(os.Getenv("DATA_SOURCE_URI_FILE")) if err != nil { panic(err) } uri = strings.TrimSpace(string(fileContents)) } else { uri = os.Getenv("DATA_SOURCE_URI") } dsn = "postgresql://" + ui + "@" + uri return []string{dsn} } return strings.Split(dsn, ",") } func contains(a []string, x string) bool { for _, n := range a { if x == n { return true } } return false } func main() { kingpin.Version(fmt.Sprintf("postgres_exporter %s (built with %s)\n", Version, runtime.Version())) log.AddFlags(kingpin.CommandLine) kingpin.Parse() // landingPage contains the HTML served at '/'. // TODO: Make this nicer and more informative. var landingPage = []byte(`<html> <head><title>Postgres exporter</title></head> <body> <h1>Postgres exporter</h1> <p><a href='` + *metricPath + `'>Metrics</a></p> </body> </html> `) if *onlyDumpMaps { dumpMaps() return } dsn := getDataSources() if len(dsn) == 0 { log.Fatal("couldn't find environment variables describing the datasource to use") } exporter := NewExporter(dsn, DisableDefaultMetrics(*disableDefaultMetrics), DisableSettingsMetrics(*disableSettingsMetrics), AutoDiscoverDatabases(*autoDiscoverDatabases), WithUserQueriesPath(*queriesPath), WithConstantLabels(*constantLabelsList), ExcludeDatabases(*excludeDatabases), ) defer func() { exporter.servers.Close() }() // Setup build info metric. version.Branch = Branch version.BuildDate = BuildDate version.Revision = Revision version.Version = VersionShort prometheus.MustRegister(version.NewCollector("postgres_exporter")) prometheus.MustRegister(exporter) http.Handle(*metricPath, promhttp.Handler()) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/html; charset=UTF-8") // nolint: errcheck w.Write(landingPage) // nolint: errcheck }) log.Infof("Starting Server: %s", *listenAddress) log.Fatal(http.ListenAndServe(*listenAddress, nil)) }
[ "\"DATA_SOURCE_NAME\"", "\"DATA_SOURCE_USER_FILE\"", "\"DATA_SOURCE_USER_FILE\"", "\"DATA_SOURCE_USER\"", "\"DATA_SOURCE_PASS_FILE\"", "\"DATA_SOURCE_PASS_FILE\"", "\"DATA_SOURCE_PASS\"", "\"DATA_SOURCE_URI_FILE\"", "\"DATA_SOURCE_URI_FILE\"", "\"DATA_SOURCE_URI\"" ]
[]
[ "DATA_SOURCE_PASS", "DATA_SOURCE_USER", "DATA_SOURCE_NAME", "DATA_SOURCE_PASS_FILE", "DATA_SOURCE_URI_FILE", "DATA_SOURCE_USER_FILE", "DATA_SOURCE_URI" ]
[]
["DATA_SOURCE_PASS", "DATA_SOURCE_USER", "DATA_SOURCE_NAME", "DATA_SOURCE_PASS_FILE", "DATA_SOURCE_URI_FILE", "DATA_SOURCE_USER_FILE", "DATA_SOURCE_URI"]
go
7
0
vendor/github.com/casualjim/go-app/application.go
package app import ( "errors" "fmt" "log" "net/url" "os" "os/signal" "path/filepath" goruntime "runtime" "strings" "sync" "syscall" "github.com/casualjim/go-app/logging" "github.com/casualjim/go-app/tracing" cjm "github.com/casualjim/middlewares" "github.com/fsnotify/fsnotify" "github.com/kardianos/osext" "github.com/sirupsen/logrus" "github.com/spf13/viper" // we enable remote config providers by default _ "github.com/spf13/viper/remote" ) var ( // ErrModuleUnknown returned when no module can be found for the specified key ErrModuleUnknown error execName func() (string, error) // Version of the application Version string ) func init() { ErrModuleUnknown = errors.New("unknown module") execName = osext.Executable log.SetOutput(logrus.StandardLogger().Writer()) log.SetFlags(0) } // A Key represents a key for a module. // Users of this package can define their own keys, this is just the type definition. type Key string // Application is an application level context package // It can be used as a kind of dependency injection container type Application interface { // Add modules to the application context Add(...Module) error // Get the module at the specified key, thread-safe Get(Key) interface{} // Get the module at the specified key, thread-safe GetOK(Key) (interface{}, bool) // Set the module at the specified key, this should be safe across multiple threads Set(Key, interface{}) error // Logger gets the root logger for this application Logger() logrus.FieldLogger // NewLogger creates a new named logger for this application NewLogger(string, logrus.Fields) logrus.FieldLogger // Tracer returns the root Tracer() tracing.Tracer // Config returns the viper config for this application Config() *viper.Viper // Info returns the app info object for this application Info() cjm.AppInfo // Init the application and its modules with the config. Init() error // Start the application an its enabled modules Start() error // Stop the application an its enabled modules Stop() error } func addDefaultConfigPaths(v *viper.Viper, name string) { v.SetConfigName("config") norm := strings.ToLower(name) paths := filepath.Join(os.Getenv("HOME"), ".config", norm) + ":" + filepath.Join("/etc", norm) + ":etc:." if os.Getenv("CONFIG_PATH") != "" { paths = os.Getenv("CONFIG_PATH") } for _, path := range filepath.SplitList(paths) { v.AddConfigPath(path) } } var viperLock *sync.Mutex func init() { viperLock = new(sync.Mutex) } func createViper(name string, configPath string) (*viper.Viper, error) { viperLock.Lock() defer viperLock.Unlock() v := viper.New() if configPath == "" { addDefaultConfigPaths(v, name) } else { if _, err := os.Stat(configPath); os.IsNotExist(err) { return nil, fmt.Errorf("No config file found at %s", configPath) } dir, fname := filepath.Split(configPath) // viper wants the file name without extention... suffixes := []string{".json", ".yml", ".yaml", ".hcl", ".toml"} for _, suffix := range suffixes { if strings.HasSuffix(fname, suffix) { fname = strings.Split(fname, suffix)[0] break } } v.SetConfigName(fname) v.AddConfigPath(dir) } if err := addViperRemoteConfig(v); err != nil { return nil, err } if err := v.ReadInConfig(); err != nil { if _, ok := err.(viper.UnsupportedConfigError); !ok && v.ConfigFileUsed() != "" { return nil, err } } v.SetEnvPrefix(name) v.AutomaticEnv() addViperDefaults(v) return v, nil } func addViperRemoteConfig(v *viper.Viper) error { // check if encryption is required CONFIG_KEYRING keyring := os.Getenv("CONFIG_KEYRING") // check for etcd env vars CONFIG_REMOTE_URL, eg: // etcd://localhost:2379/[app-name]/config.[type] // consul://localhost:8500/[app-name]/config.[type] remURL := os.Getenv("CONFIG_REMOTE_URL") if remURL == "" { return nil } var provider, host, path, tpe string u, err := url.Parse(remURL) if err != nil { return err } provider = strings.ToLower(u.Scheme) host = u.Host if provider == "etcd" { host = "http://" + host } path = u.Path tpe = strings.ToLower(strings.TrimLeft(filepath.Ext(path), ".")) if tpe == "" { tpe = "json" } v.SetConfigType(tpe) if keyring != "" { if err := v.AddSecureRemoteProvider(provider, host, path, keyring); err != nil { return err } } else { if err := v.AddRemoteProvider(provider, host, path); err != nil { return err } } if err := v.ReadRemoteConfig(); err != nil { return fmt.Errorf("config is invalid as %s", tpe) } return nil } func addViperDefaults(v *viper.Viper) { v.SetDefault("tracer", map[interface{}]interface{}{"enable": true}) v.SetDefault("logging", map[interface{}]interface{}{"root": map[interface{}]interface{}{"level": "info"}}) } func ensureDefaults(name string) (string, string, error) { // configure version defaults version := "dev" if Version != "" { version = Version } // configure name defaults if name == "" { name = os.Getenv("APP_NAME") if name == "" { exe, err := execName() if err != nil { return "", "", err } name = filepath.Base(exe) } } return name, version, nil } func newWithCallback(nme string, configPath string, reload func(fsnotify.Event)) (Application, error) { name, version, err := ensureDefaults(nme) if err != nil { return nil, err } appInfo := cjm.AppInfo{ Name: name, BasePath: "/", Version: version, Pid: os.Getpid(), } cfg, err := createViper(name, configPath) if err != nil { return nil, err } allLoggers := logging.NewRegistry(cfg, logrus.Fields{"app": appInfo.Name}) log.SetOutput(allLoggers.Writer()) tracer := allLoggers.Root().WithField("module", "trace") trace := tracing.New("", tracer, nil) go func() { sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGQUIT) buf := make([]byte, 1<<20) for { <-sigs ln := goruntime.Stack(buf, true) allLoggers.Root().Println(string(buf[:ln])) } }() app := &defaultApplication{ appInfo: appInfo, allLoggers: allLoggers, rootTracer: trace, config: cfg, registry: make(map[Key]interface{}, 100), regLock: new(sync.Mutex), } app.watchConfigurations(func(in fsnotify.Event) { if reload != nil { reload(in) } allLoggers.Reload() for _, mod := range app.modules { if err := mod.Reload(app); err != nil { allLoggers.Root().Errorf("reload config: %v", err) } } allLoggers.Root().Infoln("config file changed:", in.Name) }) return app, nil } // New application with the specified name, at the specified basepath func New(nme string) (Application, error) { return newWithCallback(nme, "", nil) } // NewWithConfig application with the specified name, with a specific config file path func NewWithConfig(nme string, configPath string) (Application, error) { return newWithCallback(nme, configPath, nil) } type defaultApplication struct { appInfo cjm.AppInfo allLoggers *logging.Registry rootTracer tracing.Tracer config *viper.Viper modules []Module registry map[Key]interface{} regLock *sync.Mutex } func (d *defaultApplication) watchConfigurations(reload func(fsnotify.Event)) { viperLock.Lock() defer viperLock.Unlock() d.config.WatchConfig() d.config.OnConfigChange(reload) // we made it this far, it's clear the url means we're also connecting remotely if os.Getenv("CONFIG_REMOTE_URL") != "" { go func() { for { err := d.config.WatchRemoteConfig() if err != nil { d.Logger().Errorf("watching remote config: %v", err) continue } reload(fsnotify.Event{Name: os.Getenv("CONFIG_REMOTE_URL"), Op: fsnotify.Write}) } }() } } func (d *defaultApplication) Add(modules ...Module) error { if len(modules) > 0 { d.modules = append(d.modules, modules...) } return nil } // Get the module at the specified key, return nil when the component doesn't exist func (d *defaultApplication) Get(key Key) interface{} { mod, _ := d.GetOK(key) return mod } // Get the module at the specified key, return false when the component doesn't exist func (d *defaultApplication) GetOK(key Key) (interface{}, bool) { d.regLock.Lock() defer d.regLock.Unlock() mod, ok := d.registry[key] if !ok { return nil, ok } return mod, ok } func (d *defaultApplication) Set(key Key, module interface{}) error { d.regLock.Lock() d.registry[key] = module d.regLock.Unlock() return nil } func (d *defaultApplication) Logger() logrus.FieldLogger { return d.allLoggers.Root() } func (d *defaultApplication) NewLogger(name string, ctx logrus.Fields) logrus.FieldLogger { return d.allLoggers.Root().New(name, ctx) } func (d *defaultApplication) Tracer() tracing.Tracer { return d.rootTracer } func (d *defaultApplication) Config() *viper.Viper { return d.config } func (d *defaultApplication) Info() cjm.AppInfo { return d.appInfo } func (d *defaultApplication) Init() error { for _, mod := range d.modules { if err := mod.Init(d); err != nil { return err } } return nil } func (d *defaultApplication) Start() error { for _, mod := range d.modules { if err := mod.Start(d); err != nil { return err } } return nil } func (d *defaultApplication) Stop() error { for _, mod := range d.modules { if err := mod.Stop(d); err != nil { return err } } return nil }
[ "\"HOME\"", "\"CONFIG_PATH\"", "\"CONFIG_PATH\"", "\"CONFIG_KEYRING\"", "\"CONFIG_REMOTE_URL\"", "\"APP_NAME\"", "\"CONFIG_REMOTE_URL\"", "\"CONFIG_REMOTE_URL\"" ]
[]
[ "CONFIG_PATH", "CONFIG_REMOTE_URL", "CONFIG_KEYRING", "APP_NAME", "HOME" ]
[]
["CONFIG_PATH", "CONFIG_REMOTE_URL", "CONFIG_KEYRING", "APP_NAME", "HOME"]
go
5
0
libbeat/processors/add_kubernetes_metadata/kubernetes.go
package add_kubernetes_metadata import ( "context" "errors" "fmt" "io/ioutil" "os" "time" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" "github.com/ericchiang/k8s" "github.com/ghodss/yaml" ) const ( timeout = time.Second * 5 ) var ( fatalError = errors.New("Unable to create kubernetes processor") ) type kubernetesAnnotator struct { podWatcher *PodWatcher matchers *Matchers } func init() { processors.RegisterPlugin("add_kubernetes_metadata", newKubernetesAnnotator) // Register default indexers Indexing.AddIndexer(PodNameIndexerName, NewPodNameIndexer) Indexing.AddIndexer(ContainerIndexerName, NewContainerIndexer) Indexing.AddMatcher(FieldMatcherName, NewFieldMatcher) } func newKubernetesAnnotator(cfg common.Config) (processors.Processor, error) { logp.Beta("The kubernetes processor is beta") config := defaultKuberentesAnnotatorConfig() err := cfg.Unpack(&config) if err != nil { return nil, fmt.Errorf("fail to unpack the kubernetes configuration: %s", err) } err = validate(config) if err != nil { return nil, err } //Load default indexer configs if config.DefaultIndexers.Enabled == true { Indexing.RLock() for key, cfg := range Indexing.defaultIndexerConfigs { config.Indexers = append(config.Indexers, map[string]common.Config{key: cfg}) } Indexing.RUnlock() } //Load default matcher configs if config.DefaultMatchers.Enabled == true { Indexing.RLock() for key, cfg := range Indexing.defaultMatcherConfigs { config.Matchers = append(config.Matchers, map[string]common.Config{key: cfg}) } Indexing.RUnlock() } metaGen := &GenDefaultMeta{ labels: config.IncludeLabels, annotations: config.IncludeAnnotations, } indexers := Indexers{ indexers: []Indexer{}, } //Create all configured indexers for _, pluginConfigs := range config.Indexers { for name, pluginConfig := range pluginConfigs { indexFunc := Indexing.GetIndexer(name) if indexFunc == nil { logp.Warn("Unable to find indexing plugin %s", name) continue } indexer, err := indexFunc(pluginConfig, metaGen) if err != nil { logp.Warn("Unable to initialize indexing plugin %s due to error %v", name, err) } indexers.indexers = append(indexers.indexers, indexer) } } matchers := Matchers{ matchers: []Matcher{}, } //Create all configured matchers for _, pluginConfigs := range config.Matchers { for name, pluginConfig := range pluginConfigs { matchFunc := Indexing.GetMatcher(name) if matchFunc == nil { logp.Warn("Unable to find matcher plugin %s", name) } matcher, err := matchFunc(pluginConfig) if err != nil { logp.Warn("Unable to initialize matcher plugin %s due to error %v", name, err) } matchers.matchers = append(matchers.matchers, matcher) } } if len(matchers.matchers) == 0 { return nil, fmt.Errorf("Can not initialize kubernetes plugin with zero matcher plugins") } var client *k8s.Client if config.InCluster == true { client, err = k8s.NewInClusterClient() if err != nil { return nil, fmt.Errorf("Unable to get in cluster configuration") } } else { data, err := ioutil.ReadFile(config.KubeConfig) if err != nil { return nil, fmt.Errorf("read kubeconfig: %v", err) } // Unmarshal YAML into a Kubernetes config object. var config k8s.Config if err = yaml.Unmarshal(data, &config); err != nil { return nil, fmt.Errorf("unmarshal kubeconfig: %v", err) } client, err = k8s.NewClient(&config) if err != nil { return nil, err } } ctx := context.Background() if config.Host == "" { podName := os.Getenv("HOSTNAME") logp.Info("Using pod name %s and namespace %s", podName, config.Namespace) if podName == "localhost" { config.Host = "localhost" } else { pod, error := client.CoreV1().GetPod(ctx, podName, config.Namespace) if error != nil { logp.Err("Querying for pod failed with error: ", error.Error()) logp.Info("Unable to find pod, setting host to localhost") config.Host = "localhost" } else { config.Host = pod.Spec.GetNodeName() } } } logp.Debug("kubernetes", "Using host ", config.Host) logp.Debug("kubernetes", "Initializing watcher") if client != nil { watcher := NewPodWatcher(client, &indexers, config.SyncPeriod, config.Host) if watcher.Run() { return kubernetesAnnotator{podWatcher: watcher, matchers: &matchers}, nil } return nil, fatalError } return nil, fatalError } func (k kubernetesAnnotator) Run(event common.MapStr) (common.MapStr, error) { index := k.matchers.MetadataIndex(event) if index == "" { return event, nil } metadata := k.podWatcher.GetMetaData(index) if metadata == nil { return event, nil } meta := common.MapStr{} metaIface, ok := event["kubernetes"] if !ok { event["kubernetes"] = common.MapStr{} } else { meta = metaIface.(common.MapStr) } meta.Update(metadata) event["kubernetes"] = meta return event, nil } func (k kubernetesAnnotator) String() string { return "add_kubernetes_metadata" } func validate(config kubeAnnotatorConfig) error { if !config.InCluster && config.KubeConfig == "" { return errors.New("`kube_config` path can't be empty when in_cluster is set to false") } return nil }
[ "\"HOSTNAME\"" ]
[]
[ "HOSTNAME" ]
[]
["HOSTNAME"]
go
1
0
main.go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "flag" "fmt" "os" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/healthz" "github.com/SAP/sap-btp-service-operator/api/v1/webhooks" "sigs.k8s.io/controller-runtime/pkg/webhook" "github.com/SAP/sap-btp-service-operator/internal/secrets" logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/SAP/sap-btp-service-operator/internal/config" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log/zap" servicesv1 "github.com/SAP/sap-btp-service-operator/api/v1" "github.com/SAP/sap-btp-service-operator/controllers" // +kubebuilder:scaffold:imports ) var ( scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") ) func init() { _ = clientgoscheme.AddToScheme(scheme) _ = servicesv1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } func main() { var metricsAddr string var enableLeaderElection bool var probeAddr string flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoints bind to.") flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") flag.Parse() ctrl.SetLogger(zap.New(zap.UseDevMode(true))) mgrOptions := ctrl.Options{ Scheme: scheme, MetricsBindAddress: metricsAddr, Port: 9443, HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, LeaderElectionID: "aa689ecc.cloud.sap.com", } if !config.Get().AllowClusterAccess { allowedNamespaces := config.Get().AllowedNamespaces allowedNamespaces = append(allowedNamespaces, config.Get().ReleaseNamespace) setupLog.Info(fmt.Sprintf("Allowed namespaces are %v", allowedNamespaces)) mgrOptions.NewCache = cache.MultiNamespacedCacheBuilder(allowedNamespaces) } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), mgrOptions) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) } secretResolver := &secrets.SecretResolver{ ManagementNamespace: config.Get().ManagementNamespace, ReleaseNamespace: config.Get().ReleaseNamespace, EnableNamespaceSecrets: config.Get().EnableNamespaceSecrets, Client: mgr.GetClient(), Log: logf.Log.WithName("secret-resolver"), } if err = (&controllers.ServiceInstanceReconciler{ BaseReconciler: &controllers.BaseReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("ServiceInstance"), Scheme: mgr.GetScheme(), Config: config.Get(), SecretResolver: secretResolver, Recorder: mgr.GetEventRecorderFor("ServiceInstance"), }, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ServiceInstance") os.Exit(1) } if err = (&controllers.ServiceBindingReconciler{ BaseReconciler: &controllers.BaseReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("ServiceBinding"), Scheme: mgr.GetScheme(), Config: config.Get(), SecretResolver: secretResolver, Recorder: mgr.GetEventRecorderFor("ServiceBinding"), }, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ServiceBinding") os.Exit(1) } if os.Getenv("ENABLE_WEBHOOKS") != "false" { mgr.GetWebhookServer().Register("/mutate-services-cloud-sap-com-v1-serviceinstance", &webhook.Admission{Handler: &webhooks.ServiceInstanceDefaulter{}}) mgr.GetWebhookServer().Register("/mutate-services-cloud-sap-com-v1-servicebinding", &webhook.Admission{Handler: &webhooks.ServiceBindingDefaulter{}}) if err = (&servicesv1.ServiceBinding{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "ServiceBinding") os.Exit(1) } } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") os.Exit(1) } if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up ready check") os.Exit(1) } setupLog.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) } }
[ "\"ENABLE_WEBHOOKS\"" ]
[]
[ "ENABLE_WEBHOOKS" ]
[]
["ENABLE_WEBHOOKS"]
go
1
0
src/terraform-resource/out/out.go
package out import ( "errors" "fmt" "io" "io/ioutil" "os" "path" "terraform-resource/logger" "terraform-resource/models" "terraform-resource/namer" "terraform-resource/ssh" "terraform-resource/storage" "terraform-resource/terraform" ) type Runner struct { SourceDir string Namer namer.Namer LogWriter io.Writer } func (r Runner) Run(req models.OutRequest) (models.OutResponse, error) { if err := req.Source.Validate(); err != nil { return models.OutResponse{}, err } req.Source.Terraform = req.Source.Terraform.Merge(req.Params.Terraform) terraformModel, err := r.buildTerraformModel(req) if err != nil { return models.OutResponse{}, err } if terraformModel.PrivateKey != "" { agent, err := ssh.SpawnAgent() if err != nil { return models.OutResponse{}, err } defer agent.Shutdown() if err = agent.AddKey([]byte(terraformModel.PrivateKey)); err != nil { return models.OutResponse{}, err } if err = os.Setenv("SSH_AUTH_SOCK", agent.SSHAuthSock()); err != nil { return models.OutResponse{}, err } } if req.Source.BackendType != "" && req.Source.MigratedFromStorage != (storage.Model{}) { return r.runWithMigratedFromStorage(req, terraformModel) } else if req.Source.BackendType == "" { return r.runWithLegacyStorage(req, terraformModel) } return r.runWithBackend(req, terraformModel) } func (r Runner) runWithBackend(req models.OutRequest, terraformModel models.Terraform) (models.OutResponse, error) { tmpDir, err := ioutil.TempDir(os.TempDir(), "terraform-resource-out") if err != nil { return models.OutResponse{}, fmt.Errorf("Failed to create tmp dir at '%s'", os.TempDir()) } defer os.RemoveAll(tmpDir) envName, err := r.buildEnvName(req, terraformModel) if err != nil { return models.OutResponse{}, fmt.Errorf("Failed to create env name: %s", err) } terraformModel.Vars["env_name"] = envName terraformModel.PlanFileLocalPath = path.Join(tmpDir, "plan") client := terraform.NewClient( terraformModel, r.LogWriter, ) action := terraform.Action{ Client: client, EnvName: envName, Model: terraformModel, Logger: logger.Logger{ Sink: r.LogWriter, }, } var result terraform.Result var actionErr error if req.Params.PlanOnly { result, actionErr = action.Plan() } else if req.Params.Action == models.DestroyAction { result, actionErr = action.Destroy() } else { result, actionErr = action.Apply() } if actionErr != nil { return models.OutResponse{}, actionErr } version := result.Version if req.Params.PlanOnly { version.PlanOnly = "true" // Concourse demands version fields are strings } metadata, err := r.buildMetadata(result.SanitizedOutput(), client) if err != nil { return models.OutResponse{}, actionErr } resp := models.OutResponse{ Version: version, Metadata: metadata, } return resp, nil } func (r Runner) runWithLegacyStorage(req models.OutRequest, terraformModel models.Terraform) (models.OutResponse, error) { logger := logger.Logger{ Sink: r.LogWriter, } logger.Warn(fmt.Sprintf("%s\n", storage.DeprecationWarning)) tmpDir, err := ioutil.TempDir(os.TempDir(), "terraform-resource-out") if err != nil { return models.OutResponse{}, fmt.Errorf("Failed to create tmp dir at '%s'", os.TempDir()) } defer os.RemoveAll(tmpDir) storageModel := req.Source.Storage if err = storageModel.Validate(); err != nil { return models.OutResponse{}, fmt.Errorf("Failed to validate storage Model: %s", err) } storageDriver := storage.BuildDriver(storageModel) envName, err := r.buildEnvNameFromLegacyStorage(req, storageDriver) if err != nil { return models.OutResponse{}, err } terraformModel.Vars["env_name"] = envName terraformModel.PlanFileLocalPath = path.Join(tmpDir, "plan") terraformModel.PlanFileRemotePath = fmt.Sprintf("%s.plan", envName) terraformModel.StateFileLocalPath = path.Join(tmpDir, "terraform.tfstate") terraformModel.StateFileRemotePath = fmt.Sprintf("%s.tfstate", envName) client := terraform.NewClient( terraformModel, r.LogWriter, ) stateFile := storage.StateFile{ LocalPath: terraformModel.StateFileLocalPath, RemotePath: terraformModel.StateFileRemotePath, StorageDriver: storageDriver, } planFile := storage.PlanFile{ LocalPath: terraformModel.PlanFileLocalPath, RemotePath: terraformModel.PlanFileRemotePath, StorageDriver: storageDriver, } action := terraform.LegacyStorageAction{ Client: client, StateFile: stateFile, PlanFile: planFile, Model: terraformModel, Logger: logger, } var result terraform.LegacyStorageResult var actionErr error if req.Params.PlanOnly { result, actionErr = action.Plan() } else if req.Params.Action == models.DestroyAction { result, actionErr = action.Destroy() } else { result, actionErr = action.Apply() } if actionErr != nil { return models.OutResponse{}, actionErr } version := models.NewVersionFromLegacyStorage(result.Version) if req.Params.PlanOnly { version.PlanOnly = "true" // Concourse demands version fields are strings } metadata, err := r.buildMetadata(result.SanitizedOutput(), client) if err != nil { return models.OutResponse{}, actionErr } resp := models.OutResponse{ Version: version, Metadata: metadata, } return resp, nil } func (r Runner) runWithMigratedFromStorage(req models.OutRequest, terraformModel models.Terraform) (models.OutResponse, error) { tmpDir, err := ioutil.TempDir(os.TempDir(), "terraform-resource-out") if err != nil { return models.OutResponse{}, fmt.Errorf("Failed to create tmp dir at '%s'", os.TempDir()) } defer os.RemoveAll(tmpDir) storageModel := req.Source.MigratedFromStorage if err = storageModel.Validate(); err != nil { return models.OutResponse{}, fmt.Errorf("Failed to validate storage Model: %s", err) } storageDriver := storage.BuildDriver(storageModel) envName, err := r.buildEnvNameFromMigrated(req, terraformModel, storageDriver) if err != nil { return models.OutResponse{}, err } terraformModel.Vars["env_name"] = envName terraformModel.PlanFileLocalPath = path.Join(tmpDir, "plan") client := terraform.NewClient( terraformModel, r.LogWriter, ) terraformModel.StateFileLocalPath = path.Join(tmpDir, "terraform.tfstate") terraformModel.StateFileRemotePath = fmt.Sprintf("%s.tfstate", envName) stateFile := storage.StateFile{ LocalPath: terraformModel.StateFileLocalPath, RemotePath: terraformModel.StateFileRemotePath, StorageDriver: storageDriver, } action := terraform.MigratedFromStorageAction{ StateFile: stateFile, Client: client, EnvName: envName, Model: terraformModel, Logger: logger.Logger{ Sink: r.LogWriter, }, } var result terraform.Result var actionErr error if req.Params.PlanOnly { result, actionErr = action.Plan() } else if req.Params.Action == models.DestroyAction { result, actionErr = action.Destroy() } else { result, actionErr = action.Apply() } if actionErr != nil { return models.OutResponse{}, actionErr } version := result.Version if req.Params.PlanOnly { version.PlanOnly = "true" // Concourse demands version fields are strings } metadata, err := r.buildMetadata(result.SanitizedOutput(), client) if err != nil { return models.OutResponse{}, actionErr } resp := models.OutResponse{ Version: version, Metadata: metadata, } return resp, nil } func (r Runner) buildEnvName(req models.OutRequest, terraformModel models.Terraform) (string, error) { tfClientWithoutWorkspace := terraform.NewClient( terraformModel, r.LogWriter, ) namer := BackendEnvNamer{ Req: req, TerraformClient: tfClientWithoutWorkspace, Namer: r.Namer, } return namer.EnvName() } func (r Runner) buildEnvNameFromLegacyStorage(req models.OutRequest, storageDriver storage.Storage) (string, error) { namer := LegacyStorageEnvNamer{ Req: req, StorageDriver: storageDriver, Namer: r.Namer, } return namer.EnvName() } func (r Runner) buildEnvNameFromMigrated(req models.OutRequest, terraformModel models.Terraform, storageDriver storage.Storage) (string, error) { tfClientWithoutWorkspace := terraform.NewClient( terraformModel, r.LogWriter, ) namer := MigratedFromStorageEnvNamer{ Req: req, StorageDriver: storageDriver, Namer: r.Namer, TerraformClient: tfClientWithoutWorkspace, } return namer.EnvName() } func (r Runner) buildTerraformModel(req models.OutRequest) (models.Terraform, error) { terraformModel := req.Source.Terraform if terraformModel.VarFiles != nil { for i := range terraformModel.VarFiles { terraformModel.VarFiles[i] = path.Join(r.SourceDir, terraformModel.VarFiles[i]) } } if err := terraformModel.ParseVarsFromFiles(); err != nil { return models.Terraform{}, fmt.Errorf("Failed to parse `terraform.var_files`: %s", err) } if err := terraformModel.ParseImportsFromFile(); err != nil { return models.Terraform{}, fmt.Errorf("Failed to parse `terraform.imports_file`: %s", err) } if len(terraformModel.Source) == 0 { return models.Terraform{}, errors.New("Missing required field `terraform.source`") } terraformModel.Vars["build_id"] = os.Getenv("BUILD_ID") terraformModel.Vars["build_name"] = os.Getenv("BUILD_NAME") terraformModel.Vars["build_job_name"] = os.Getenv("BUILD_JOB_NAME") terraformModel.Vars["build_pipeline_name"] = os.Getenv("BUILD_PIPELINE_NAME") terraformModel.Vars["build_team_name"] = os.Getenv("BUILD_TEAM_NAME") terraformModel.Vars["atc_external_url"] = os.Getenv("ATC_EXTERNAL_URL") return terraformModel, nil } func (r Runner) buildMetadata(outputs map[string]string, client terraform.Client) ([]models.MetadataField, error) { metadata := []models.MetadataField{} for key, value := range outputs { metadata = append(metadata, models.MetadataField{ Name: key, Value: value, }) } tfVersion, err := client.Version() if err != nil { return nil, err } return append(metadata, models.MetadataField{ Name: "terraform_version", Value: tfVersion, }), nil }
[ "\"BUILD_ID\"", "\"BUILD_NAME\"", "\"BUILD_JOB_NAME\"", "\"BUILD_PIPELINE_NAME\"", "\"BUILD_TEAM_NAME\"", "\"ATC_EXTERNAL_URL\"" ]
[]
[ "BUILD_NAME", "BUILD_JOB_NAME", "BUILD_TEAM_NAME", "ATC_EXTERNAL_URL", "BUILD_ID", "BUILD_PIPELINE_NAME" ]
[]
["BUILD_NAME", "BUILD_JOB_NAME", "BUILD_TEAM_NAME", "ATC_EXTERNAL_URL", "BUILD_ID", "BUILD_PIPELINE_NAME"]
go
6
0
scaper_concept/venv/lib/python3.9/site-packages/_distutils_hack/__init__.py
import sys import os import re import importlib import warnings is_pypy = '__pypy__' in sys.builtin_module_names warnings.filterwarnings('ignore', r'.+ distutils\b.+ deprecated', DeprecationWarning) def warn_distutils_present(): if 'distutils' not in sys.modules: return if is_pypy and sys.version_info < (3, 7): # PyPy for 3.6 unconditionally imports distutils, so bypass the warning # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 return warnings.warn( "Distutils was imported before Setuptools, but importing Setuptools " "also replaces the `distutils` module in `sys.modules`. This may lead " "to undesirable behaviors or errors. To avoid these issues, avoid " "using distutils directly, ensure that setuptools is installed in the " "traditional way (e.g. not an editable install), and/or make sure " "that setuptools is always imported before distutils.") def clear_distutils(): if 'distutils' not in sys.modules: return warnings.warn("Setuptools is replacing distutils.") mods = [name for name in sys.modules if re.match(r'distutils\b', name)] for name in mods: del sys.modules[name] def enabled(): """ Allow selection of distutils by environment variable. """ which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib') return which == 'local' def ensure_local_distutils(): clear_distutils() # With the DistutilsMetaFinder in place, # perform an import to cause distutils to be # loaded from setuptools._distutils. Ref #2906. add_shim() importlib.import_module('distutils') remove_shim() # check that submodules load as expected core = importlib.import_module('distutils.core') assert '_distutils' in core.__file__, core.__file__ def do_override(): """ Ensure that the local copy of distutils is preferred over stdlib. See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 for more motivation. """ if enabled(): warn_distutils_present() ensure_local_distutils() class DistutilsMetaFinder: def find_spec(self, fullname, path, target=None): if path is not None: return method_name = 'spec_for_{fullname}'.format(**locals()) method = getattr(self, method_name, lambda: None) return method() def spec_for_distutils(self): import importlib.abc import importlib.util class DistutilsLoader(importlib.abc.Loader): def create_module(self, spec): return importlib.import_module('setuptools._distutils') def exec_module(self, module): pass return importlib.util.spec_from_loader('distutils', DistutilsLoader()) def spec_for_pip(self): """ Ensure stdlib distutils when running under pip. See pypa/pip#8761 for rationale. """ if self.pip_imported_during_build(): return clear_distutils() self.spec_for_distutils = lambda: None @staticmethod def pip_imported_during_build(): """ Detect if pip is being imported in a build script. Ref #2355. """ import traceback return any( frame.f_globals['__file__'].endswith('setup.py') for frame, line in traceback.walk_stack(None) ) DISTUTILS_FINDER = DistutilsMetaFinder() def add_shim(): sys.meta_path.insert(0, DISTUTILS_FINDER) def remove_shim(): try: sys.meta_path.remove(DISTUTILS_FINDER) except ValueError: pass
[]
[]
[ "SETUPTOOLS_USE_DISTUTILS" ]
[]
["SETUPTOOLS_USE_DISTUTILS"]
python
1
0
stackstate_checks_dev/tests/tooling/commands/test_create.py
# (C) Datadog, Inc. 2018 # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import os import sys from stackstate_checks.dev import EnvVars, run_command from stackstate_checks.dev.utils import chdir, remove_path from stackstate_checks.dev._env import TESTING_PLUGIN, E2E_PREFIX HERE = os.path.dirname(os.path.abspath(__file__)) CORE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(HERE)))) def test_new_check_test(): check_path = os.path.join(CORE_ROOT, 'my_check') try: run_command( [sys.executable, '-m', 'stackstate_checks.dev', 'create', '-ni', '-q', '-l', CORE_ROOT, 'my-check'], capture=True, check=True ) run_command( [sys.executable, '-m', 'pip', 'install', check_path], capture=True, check=True ) with chdir(check_path): ignored_env_vars = [TESTING_PLUGIN, 'PYTEST_ADDOPTS'] ignored_env_vars.extend(ev for ev in os.environ if ev.startswith(E2E_PREFIX)) with EnvVars(ignore=ignored_env_vars): run_command([sys.executable, '-m', 'pytest'], capture=True, check=True) run_command( [sys.executable, '-m', 'pip', 'uninstall', '-y', 'my-check'], capture=True, check=True ) finally: remove_path(check_path)
[]
[]
[]
[]
[]
python
0
0
scripts/test_globalF.py
import argparse import os import time import pickle import numpy as np import torch from torch.utils.model_zoo import load_url from torchvision import transforms from cirtorch.models.GF_net import init_network, extract_vectors from cirtorch.datasets.datahelpers import cid2filename from cirtorch.datasets.testdataset import configdataset from cirtorch.utils.download import download_train, download_test from cirtorch.utils.whiten import whitenlearn, whitenapply from cirtorch.utils.evaluate import compute_map_and_print from cirtorch.utils.general import get_data_root, htime PRETRAINED = { 'retrievalSfM120k-vgg16-gem' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/retrievalSfM120k-vgg16-gem-b4dcdc6.pth', 'retrievalSfM120k-resnet101-gem' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/retrievalSfM120k-resnet101-gem-b80fb85.pth', # new modules with whitening learned end-to-end 'rSfM120k-tl-resnet50-gem-w' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/rSfM120k-tl-resnet50-gem-w-97bf910.pth', 'rSfM120k-tl-resnet101-gem-w' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/rSfM120k-tl-resnet101-gem-w-a155e54.pth', 'rSfM120k-tl-resnet152-gem-w' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/rSfM120k-tl-resnet152-gem-w-f39cada.pth', 'gl18-tl-resnet50-gem-w' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/gl18/gl18-tl-resnet50-gem-w-83fdc30.pth', 'gl18-tl-resnet101-gem-w' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/gl18/gl18-tl-resnet101-gem-w-a4d43db.pth', 'gl18-tl-resnet152-gem-w' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/gl18/gl18-tl-resnet152-gem-w-21278d5.pth', } datasets_names = ['oxford5k', 'paris6k', 'roxford5k', 'rparis6k'] whitening_names = ['retrieval-SfM-30k', 'retrieval-SfM-120k'] parser = argparse.ArgumentParser(description='PyTorch CNN Image Retrieval Testing') # network group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--network-path', '-npath', metavar='NETWORK', help="pretrained network or network path (destination where network is saved)") group.add_argument('--network-offtheshelf', '-noff', metavar='NETWORK', help="off-the-shelf network, in the format 'ARCHITECTURE-POOLING' or 'ARCHITECTURE-POOLING-{reg-lwhiten-whiten}'," + " examples: 'resnet101-gem' | 'resnet101-gem-reg' | 'resnet101-gem-whiten' | 'resnet101-gem-lwhiten' | 'resnet101-gem-reg-whiten'") # test options parser.add_argument('--datasets', '-d', metavar='DATASETS', default='oxford5k,paris6k', help="comma separated list of test datasets: " + " | ".join(datasets_names) + " (default: 'oxford5k,paris6k')") parser.add_argument('--image-size', '-imsize', default=1024, type=int, metavar='N', help="maximum size of longer image side used for testing (default: 1024)") parser.add_argument('--multiscale', '-ms', metavar='MULTISCALE', default='[1]', help="use multiscale vectors for testing, " + " examples: '[1]' | '[1, 1/2**(1/2), 1/2]' | '[1, 2**(1/2), 1/2**(1/2)]' (default: '[1]')") parser.add_argument('--whitening', '-w', metavar='WHITENING', default=None, choices=whitening_names, help="dataset used to learn whitening for testing: " + " | ".join(whitening_names) + " (default: None)") # GPU ID parser.add_argument('--gpu-id', '-g', default='0', metavar='N', help="gpu id used for testing (default: '0')") def main(): args = parser.parse_args() # check if there are unknown datasets for dataset in args.datasets.split(','): if dataset not in datasets_names: raise ValueError('Unsupported or unknown dataset: {}!'.format(dataset)) # check if test dataset are downloaded # and download if they are not download_train(get_data_root()) download_test(get_data_root()) # setting up the visible GPU os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id # loading network from path if args.network_path is not None: print(">> Loading network:\n>>>> '{}'".format(args.network_path)) if args.network_path in PRETRAINED: # pretrained modules (downloaded automatically) state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'modules')) else: # fine-tuned network from path state = torch.load(args.network_path) # parsing net params from meta # architecture, pooling, mean, std required # the rest has default values, in case that is doesnt exist net_params = {} net_params['architecture'] = state['meta']['architecture'] net_params['pooling'] = state['meta']['pooling'] net_params['local_whitening'] = state['meta'].get('local_whitening', False) net_params['regional'] = state['meta'].get('regional', False) net_params['whitening'] = state['meta'].get('whitening', False) net_params['mean'] = state['meta']['mean'] net_params['std'] = state['meta']['std'] net_params['pretrained'] = False # load network net = init_network(net_params) net.load_state_dict(state['state_dict']) # if whitening is precomputed if 'Lw' in state['meta']: net.meta['Lw'] = state['meta']['Lw'] print(">>>> loaded network: ") print(net.meta_repr()) # loading offtheshelf network elif args.network_offtheshelf is not None: # parse off-the-shelf parameters offtheshelf = args.network_offtheshelf.split('-') net_params = {} net_params['architecture'] = offtheshelf[0] net_params['pooling'] = offtheshelf[1] net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:] net_params['regional'] = 'reg' in offtheshelf[2:] net_params['whitening'] = 'whiten' in offtheshelf[2:] net_params['pretrained'] = True # load off-the-shelf network print(">> Loading off-the-shelf network:\n>>>> '{}'".format(args.network_offtheshelf)) net = init_network(net_params) print(">>>> loaded network: ") print(net.meta_repr()) # setting up the multi-scale parameters ms = list(eval(args.multiscale)) if len(ms)>1 and net.meta['pooling'] == 'gem' and not net.meta['regional'] and not net.meta['whitening']: msp = net.pool.p.item() print(">> Set-up multiscale:") print(">>>> ms: {}".format(ms)) print(">>>> msp: {}".format(msp)) else: msp = 1 # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize( mean=net.meta['mean'], std=net.meta['std'] ) transform = transforms.Compose([ transforms.ToTensor(), normalize ]) # compute whitening if args.whitening is not None: start = time.time() if 'Lw' in net.meta and args.whitening in net.meta['Lw']: print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening)) if len(ms)>1: Lw = net.meta['Lw'][args.whitening]['ms'] else: Lw = net.meta['Lw'][args.whitening]['ss'] else: # if we evaluate modules from path we should save/load whitening # not to compute it every time if args.network_path is not None: whiten_fn = args.network_path + '_{}_whiten'.format(args.whitening) if len(ms) > 1: whiten_fn += '_ms' whiten_fn += '.pth' else: whiten_fn = None if whiten_fn is not None and os.path.isfile(whiten_fn): print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening)) Lw = torch.load(whiten_fn) else: print('>> {}: Learning whitening...'.format(args.whitening)) # loading db db_root = os.path.join(get_data_root(), 'train', args.whitening) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))] # extract whitening vectors print('>> {}: Extracting...'.format(args.whitening)) wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(args.whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} # saving whitening if whiten_fn exists if whiten_fn is not None: print('>> {}: Saving to {}...'.format(args.whitening, whiten_fn)) torch.save(Lw, whiten_fn) print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time()-start))) else: Lw = None # evaluate on test datasets datasets = args.datasets.split(',') for dataset in datasets: start = time.time() print('>> {}: Extracting...'.format(dataset)) # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) images = [cfg['im_fname'](cfg,i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg,i) for i in range(cfg['nq'])] try: bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])] except: bbxs = None # for holidaysmanrot and copydays # extract database and query vectors print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=bbxs, ms=ms, msp=msp) print('>> {}: Evaluating...'.format(dataset)) # convert to numpy vecs = vecs.numpy() qvecs = qvecs.numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) compute_map_and_print(dataset, ranks, cfg['gnd']) if Lw is not None: # whiten the vectors vecs_lw = whitenapply(vecs, Lw['m'], Lw['P']) qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P']) # search, rank, and print scores = np.dot(vecs_lw.T, qvecs_lw) ranks = np.argsort(-scores, axis=0) compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd']) print('>> {}: elapsed time: {}'.format(dataset, htime(time.time()-start))) if __name__ == '__main__': main()
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
utils/utils.go
package utils import ( "bytes" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "index/suffixarray" "io" "io/ioutil" "net/http" "os" "os/exec" "path/filepath" "runtime" "strconv" "strings" "sync" "time" ) // Go is a basic promise implementation: it wraps calls a function in a goroutine, // and returns a channel which will later return the function's return value. func Go(f func() error) chan error { ch := make(chan error) go func() { ch <- f() }() return ch } // Request a given URL and return an io.Reader func Download(url string, stderr io.Writer) (*http.Response, error) { var resp *http.Response var err error = nil if resp, err = http.Get(url); err != nil { return nil, err } if resp.StatusCode >= 400 { return nil, errors.New("Got HTTP status code >= 400: " + resp.Status) } return resp, nil } // Debug function, if the debug flag is set, then display. Do nothing otherwise // If Docker is in damon mode, also send the debug info on the socket func Debugf(format string, a ...interface{}) { if os.Getenv("DEBUG") != "" { // Retrieve the stack infos _, file, line, ok := runtime.Caller(1) if !ok { file = "<unknown>" line = -1 } else { file = file[strings.LastIndex(file, "/")+1:] } fmt.Fprintf(os.Stderr, fmt.Sprintf("[debug] %s:%d %s\n", file, line, format), a...) } } // Reader with progress bar type progressReader struct { reader io.ReadCloser // Stream to read from output io.Writer // Where to send progress bar to readTotal int // Expected stream length (bytes) readProgress int // How much has been read so far (bytes) lastUpdate int // How many bytes read at least update template string // Template to print. Default "%v/%v (%v)" sf *StreamFormatter } func (r *progressReader) Read(p []byte) (n int, err error) { read, err := io.ReadCloser(r.reader).Read(p) r.readProgress += read updateEvery := 4096 if r.readTotal > 0 { // Only update progress for every 1% read if increment := int(0.01 * float64(r.readTotal)); increment > updateEvery { updateEvery = increment } } if r.readProgress-r.lastUpdate > updateEvery || err != nil { if r.readTotal > 0 { fmt.Fprintf(r.output, r.template, r.readProgress, r.readTotal, fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100)) } else { fmt.Fprintf(r.output, r.template, r.readProgress, "?", "n/a") } r.lastUpdate = r.readProgress } // Send newline when complete if err != nil { r.output.Write(r.sf.FormatStatus("")) } return read, err } func (r *progressReader) Close() error { return io.ReadCloser(r.reader).Close() } func ProgressReader(r io.ReadCloser, size int, output io.Writer, template []byte, sf *StreamFormatter) *progressReader { tpl := string(template) if tpl == "" { tpl = string(sf.FormatProgress("", "%v/%v (%v)")) } return &progressReader{r, NewWriteFlusher(output), size, 0, 0, tpl, sf} } // HumanDuration returns a human-readable approximation of a duration // (eg. "About a minute", "4 hours ago", etc.) func HumanDuration(d time.Duration) string { if seconds := int(d.Seconds()); seconds < 1 { return "Less than a second" } else if seconds < 60 { return fmt.Sprintf("%d seconds", seconds) } else if minutes := int(d.Minutes()); minutes == 1 { return "About a minute" } else if minutes < 60 { return fmt.Sprintf("%d minutes", minutes) } else if hours := int(d.Hours()); hours == 1 { return "About an hour" } else if hours < 48 { return fmt.Sprintf("%d hours", hours) } else if hours < 24*7*2 { return fmt.Sprintf("%d days", hours/24) } else if hours < 24*30*3 { return fmt.Sprintf("%d weeks", hours/24/7) } else if hours < 24*365*2 { return fmt.Sprintf("%d months", hours/24/30) } return fmt.Sprintf("%d years", d.Hours()/24/365) } func Trunc(s string, maxlen int) string { if len(s) <= maxlen { return s } return s[:maxlen] } // Figure out the absolute path of our own binary func SelfPath() string { path, err := exec.LookPath(os.Args[0]) if err != nil { panic(err) } path, err = filepath.Abs(path) if err != nil { panic(err) } return path } type NopWriter struct { } func (w *NopWriter) Write(buf []byte) (int, error) { return len(buf), nil } type nopWriteCloser struct { io.Writer } func (w *nopWriteCloser) Close() error { return nil } func NopWriteCloser(w io.Writer) io.WriteCloser { return &nopWriteCloser{w} } type bufReader struct { buf *bytes.Buffer reader io.Reader err error l sync.Mutex wait sync.Cond } func NewBufReader(r io.Reader) *bufReader { reader := &bufReader{ buf: &bytes.Buffer{}, reader: r, } reader.wait.L = &reader.l go reader.drain() return reader } func (r *bufReader) drain() { buf := make([]byte, 1024) for { n, err := r.reader.Read(buf) r.l.Lock() if err != nil { r.err = err } else { r.buf.Write(buf[0:n]) } r.wait.Signal() r.l.Unlock() if err != nil { break } } } func (r *bufReader) Read(p []byte) (n int, err error) { r.l.Lock() defer r.l.Unlock() for { n, err = r.buf.Read(p) if n > 0 { return n, err } if r.err != nil { return 0, r.err } r.wait.Wait() } panic("unreachable") } func (r *bufReader) Close() error { closer, ok := r.reader.(io.ReadCloser) if !ok { return nil } return closer.Close() } type WriteBroadcaster struct { mu sync.Mutex writers map[io.WriteCloser]struct{} } func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser) { w.mu.Lock() w.writers[writer] = struct{}{} w.mu.Unlock() } // FIXME: Is that function used? // FIXME: This relies on the concrete writer type used having equality operator func (w *WriteBroadcaster) RemoveWriter(writer io.WriteCloser) { w.mu.Lock() delete(w.writers, writer) w.mu.Unlock() } func (w *WriteBroadcaster) Write(p []byte) (n int, err error) { w.mu.Lock() defer w.mu.Unlock() for writer := range w.writers { if n, err := writer.Write(p); err != nil || n != len(p) { // On error, evict the writer delete(w.writers, writer) } } return len(p), nil } func (w *WriteBroadcaster) CloseWriters() error { w.mu.Lock() defer w.mu.Unlock() for writer := range w.writers { writer.Close() } w.writers = make(map[io.WriteCloser]struct{}) return nil } func NewWriteBroadcaster() *WriteBroadcaster { return &WriteBroadcaster{writers: make(map[io.WriteCloser]struct{})} } func GetTotalUsedFds() int { if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { Debugf("Error opening /proc/%d/fd: %s", os.Getpid(), err) } else { return len(fds) } return -1 } // TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. // This is used to retrieve image and container IDs by more convenient shorthand prefixes. type TruncIndex struct { index *suffixarray.Index ids map[string]bool bytes []byte } func NewTruncIndex() *TruncIndex { return &TruncIndex{ index: suffixarray.New([]byte{' '}), ids: make(map[string]bool), bytes: []byte{' '}, } } func (idx *TruncIndex) Add(id string) error { if strings.Contains(id, " ") { return fmt.Errorf("Illegal character: ' '") } if _, exists := idx.ids[id]; exists { return fmt.Errorf("Id already exists: %s", id) } idx.ids[id] = true idx.bytes = append(idx.bytes, []byte(id+" ")...) idx.index = suffixarray.New(idx.bytes) return nil } func (idx *TruncIndex) Delete(id string) error { if _, exists := idx.ids[id]; !exists { return fmt.Errorf("No such id: %s", id) } before, after, err := idx.lookup(id) if err != nil { return err } delete(idx.ids, id) idx.bytes = append(idx.bytes[:before], idx.bytes[after:]...) idx.index = suffixarray.New(idx.bytes) return nil } func (idx *TruncIndex) lookup(s string) (int, int, error) { offsets := idx.index.Lookup([]byte(" "+s), -1) //log.Printf("lookup(%s): %v (index bytes: '%s')\n", s, offsets, idx.index.Bytes()) if offsets == nil || len(offsets) == 0 || len(offsets) > 1 { return -1, -1, fmt.Errorf("No such id: %s", s) } offsetBefore := offsets[0] + 1 offsetAfter := offsetBefore + strings.Index(string(idx.bytes[offsetBefore:]), " ") return offsetBefore, offsetAfter, nil } func (idx *TruncIndex) Get(s string) (string, error) { before, after, err := idx.lookup(s) //log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after) if err != nil { return "", err } return string(idx.bytes[before:after]), err } // TruncateId returns a shorthand version of a string identifier for convenience. // A collision with other shorthands is very unlikely, but possible. // In case of a collision a lookup with TruncIndex.Get() will fail, and the caller // will need to use a langer prefix, or the full-length Id. func TruncateId(id string) string { shortLen := 12 if len(id) < shortLen { shortLen = len(id) } return id[:shortLen] } // Code c/c from io.Copy() modified to handle escape sequence func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { buf := make([]byte, 32*1024) for { nr, er := src.Read(buf) if nr > 0 { // ---- Docker addition // char 16 is C-p if nr == 1 && buf[0] == 16 { nr, er = src.Read(buf) // char 17 is C-q if nr == 1 && buf[0] == 17 { if err := src.Close(); err != nil { return 0, err } return 0, io.EOF } } // ---- End of docker nw, ew := dst.Write(buf[0:nr]) if nw > 0 { written += int64(nw) } if ew != nil { err = ew break } if nr != nw { err = io.ErrShortWrite break } } if er == io.EOF { break } if er != nil { err = er break } } return written, err } func HashData(src io.Reader) (string, error) { h := sha256.New() if _, err := io.Copy(h, src); err != nil { return "", err } return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil } type KernelVersionInfo struct { Kernel int Major int Minor int Flavor string } func (k *KernelVersionInfo) String() string { flavor := "" if len(k.Flavor) > 0 { flavor = fmt.Sprintf("-%s", k.Flavor) } return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, flavor) } // Compare two KernelVersionInfo struct. // Returns -1 if a < b, = if a == b, 1 it a > b func CompareKernelVersion(a, b *KernelVersionInfo) int { if a.Kernel < b.Kernel { return -1 } else if a.Kernel > b.Kernel { return 1 } if a.Major < b.Major { return -1 } else if a.Major > b.Major { return 1 } if a.Minor < b.Minor { return -1 } else if a.Minor > b.Minor { return 1 } return 0 } func FindCgroupMountpoint(cgroupType string) (string, error) { output, err := ioutil.ReadFile("/proc/mounts") if err != nil { return "", err } // /proc/mounts has 6 fields per line, one mount per line, e.g. // cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 for _, line := range strings.Split(string(output), "\n") { parts := strings.Split(line, " ") if len(parts) == 6 && parts[2] == "cgroup" { for _, opt := range strings.Split(parts[3], ",") { if opt == cgroupType { return parts[1], nil } } } } return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType) } func GetKernelVersion() (*KernelVersionInfo, error) { var ( flavor string kernel, major, minor int err error ) uts, err := uname() if err != nil { return nil, err } release := make([]byte, len(uts.Release)) i := 0 for _, c := range uts.Release { release[i] = byte(c) i++ } // Remove the \x00 from the release for Atoi to parse correctly release = release[:bytes.IndexByte(release, 0)] tmp := strings.SplitN(string(release), "-", 2) tmp2 := strings.SplitN(tmp[0], ".", 3) if len(tmp2) > 0 { kernel, err = strconv.Atoi(tmp2[0]) if err != nil { return nil, err } } if len(tmp2) > 1 { major, err = strconv.Atoi(tmp2[1]) if err != nil { return nil, err } } if len(tmp2) > 2 { minor, err = strconv.Atoi(tmp2[2]) if err != nil { return nil, err } } if len(tmp) == 2 { flavor = tmp[1] } else { flavor = "" } return &KernelVersionInfo{ Kernel: kernel, Major: major, Minor: minor, Flavor: flavor, }, nil } func CopyDirectory(source, dest string) error { if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil { return fmt.Errorf("Error copy: %s (%s)", err, output) } return nil } type NopFlusher struct{} func (f *NopFlusher) Flush() {} type WriteFlusher struct { w io.Writer flusher http.Flusher } func (wf *WriteFlusher) Write(b []byte) (n int, err error) { n, err = wf.w.Write(b) wf.flusher.Flush() return n, err } func NewWriteFlusher(w io.Writer) *WriteFlusher { var flusher http.Flusher if f, ok := w.(http.Flusher); ok { flusher = f } else { flusher = &NopFlusher{} } return &WriteFlusher{w: w, flusher: flusher} } type JsonMessage struct { Status string `json:"status,omitempty"` Progress string `json:"progress,omitempty"` Error string `json:"error,omitempty"` } type StreamFormatter struct { json bool used bool } func NewStreamFormatter(json bool) *StreamFormatter { return &StreamFormatter{json, false} } func (sf *StreamFormatter) FormatStatus(format string, a ...interface{}) []byte { sf.used = true str := fmt.Sprintf(format, a...) if sf.json { b, err := json.Marshal(&JsonMessage{Status:str}); if err != nil { return sf.FormatError(err) } return b } return []byte(str + "\r\n") } func (sf *StreamFormatter) FormatError(err error) []byte { sf.used = true if sf.json { if b, err := json.Marshal(&JsonMessage{Error:err.Error()}); err == nil { return b } return []byte("{\"error\":\"format error\"}") } return []byte("Error: " + err.Error() + "\r\n") } func (sf *StreamFormatter) FormatProgress(action, str string) []byte { sf.used = true if sf.json { b, err := json.Marshal(&JsonMessage{Progress:str}) if err != nil { return nil } return b } return []byte(action + " " + str + "\r") } func (sf *StreamFormatter) Used() bool { return sf.used }
[ "\"DEBUG\"" ]
[]
[ "DEBUG" ]
[]
["DEBUG"]
go
1
0
gomplate_test.go
package gomplate import ( "bytes" "context" "net/http/httptest" "os" "path/filepath" "testing" "github.com/spf13/afero" "text/template" "github.com/hairyhenderson/gomplate/v3/aws" "github.com/hairyhenderson/gomplate/v3/conv" "github.com/hairyhenderson/gomplate/v3/data" "github.com/hairyhenderson/gomplate/v3/env" "github.com/stretchr/testify/assert" ) func testTemplate(g *gomplate, tmpl string) string { var out bytes.Buffer err := g.runTemplate(context.TODO(), &tplate{name: "testtemplate", contents: tmpl, target: &out}) if err != nil { panic(err) } return out.String() } func TestGetenvTemplates(t *testing.T) { g := &gomplate{ funcMap: template.FuncMap{ "getenv": env.Getenv, "bool": conv.Bool, }, } assert.Empty(t, testTemplate(g, `{{getenv "BLAHBLAHBLAH"}}`)) assert.Equal(t, os.Getenv("USER"), testTemplate(g, `{{getenv "USER"}}`)) assert.Equal(t, "default value", testTemplate(g, `{{getenv "BLAHBLAHBLAH" "default value"}}`)) } func TestBoolTemplates(t *testing.T) { g := &gomplate{ funcMap: template.FuncMap{ "bool": conv.Bool, }, } assert.Equal(t, "true", testTemplate(g, `{{bool "true"}}`)) assert.Equal(t, "false", testTemplate(g, `{{bool "false"}}`)) assert.Equal(t, "false", testTemplate(g, `{{bool "foo"}}`)) assert.Equal(t, "false", testTemplate(g, `{{bool ""}}`)) } func TestEc2MetaTemplates(t *testing.T) { createGomplate := func(status int, body string) (*gomplate, *httptest.Server) { server, ec2meta := aws.MockServer(status, body) return &gomplate{funcMap: template.FuncMap{"ec2meta": ec2meta.Meta}}, server } g, s := createGomplate(404, "") defer s.Close() assert.Equal(t, "", testTemplate(g, `{{ec2meta "foo"}}`)) assert.Equal(t, "default", testTemplate(g, `{{ec2meta "foo" "default"}}`)) s.Close() g, s = createGomplate(200, "i-1234") defer s.Close() assert.Equal(t, "i-1234", testTemplate(g, `{{ec2meta "instance-id"}}`)) assert.Equal(t, "i-1234", testTemplate(g, `{{ec2meta "instance-id" "default"}}`)) } func TestEc2MetaTemplates_WithJSON(t *testing.T) { server, ec2meta := aws.MockServer(200, `{"foo":"bar"}`) defer server.Close() g := &gomplate{ funcMap: template.FuncMap{ "ec2meta": ec2meta.Meta, "ec2dynamic": ec2meta.Dynamic, "json": data.JSON, }, } assert.Equal(t, "bar", testTemplate(g, `{{ (ec2meta "obj" | json).foo }}`)) assert.Equal(t, "bar", testTemplate(g, `{{ (ec2dynamic "obj" | json).foo }}`)) } func TestJSONArrayTemplates(t *testing.T) { g := &gomplate{ funcMap: template.FuncMap{ "jsonArray": data.JSONArray, }, } assert.Equal(t, "[foo bar]", testTemplate(g, `{{jsonArray "[\"foo\",\"bar\"]"}}`)) assert.Equal(t, "bar", testTemplate(g, `{{ index (jsonArray "[\"foo\",\"bar\"]") 1 }}`)) } func TestYAMLTemplates(t *testing.T) { g := &gomplate{ funcMap: template.FuncMap{ "yaml": data.YAML, "yamlArray": data.YAMLArray, }, } assert.Equal(t, "bar", testTemplate(g, `{{(yaml "foo: bar").foo}}`)) assert.Equal(t, "[foo bar]", testTemplate(g, `{{yamlArray "- foo\n- bar\n"}}`)) assert.Equal(t, "bar", testTemplate(g, `{{ index (yamlArray "[\"foo\",\"bar\"]") 1 }}`)) } func TestSliceTemplates(t *testing.T) { g := &gomplate{ funcMap: template.FuncMap{ "slice": conv.Slice, }, } assert.Equal(t, "foo", testTemplate(g, `{{index (slice "foo") 0}}`)) assert.Equal(t, `[foo bar 42]`, testTemplate(g, `{{slice "foo" "bar" 42}}`)) assert.Equal(t, `helloworld`, testTemplate(g, `{{range slice "hello" "world"}}{{.}}{{end}}`)) } func TestHasTemplate(t *testing.T) { g := &gomplate{ funcMap: template.FuncMap{ "yaml": data.YAML, "has": conv.Has, }, } assert.Equal(t, "true", testTemplate(g, `{{has ("foo:\n bar: true" | yaml) "foo"}}`)) assert.Equal(t, "true", testTemplate(g, `{{has ("foo:\n bar: true" | yaml).foo "bar"}}`)) assert.Equal(t, "false", testTemplate(g, `{{has ("foo: true" | yaml) "bah"}}`)) tmpl := `{{- $data := yaml "foo: bar\nbaz: qux\n" }} {{- if (has $data "baz") }} {{- $data.baz }} {{- end }}` assert.Equal(t, "qux", testTemplate(g, tmpl)) tmpl = `{{- $data := yaml "foo: bar\nbaz: qux\n" }} {{- if (has $data "quux") }} {{- $data.quux }} {{- else }} {{- $data.foo }} {{- end }}` assert.Equal(t, "bar", testTemplate(g, tmpl)) } func TestCustomDelim(t *testing.T) { g := &gomplate{ leftDelim: "[", rightDelim: "]", funcMap: template.FuncMap{}, } assert.Equal(t, "hi", testTemplate(g, `[print "hi"]`)) } func TestRunTemplates(t *testing.T) { defer func() { Stdout = os.Stdout }() buf := &bytes.Buffer{} Stdout = &nopWCloser{buf} config := &Config{Input: "foo", OutputFiles: []string{"-"}} err := RunTemplates(config) assert.NoError(t, err) assert.Equal(t, "foo", buf.String()) assert.Equal(t, 1, Metrics.TemplatesGathered) assert.Equal(t, 1, Metrics.TemplatesProcessed) assert.Equal(t, 0, Metrics.Errors) } func TestParseTemplateArg(t *testing.T) { fs = afero.NewMemMapFs() afero.WriteFile(fs, "foo.t", []byte("hi"), 0600) _ = fs.MkdirAll("dir", 0755) afero.WriteFile(fs, "dir/foo.t", []byte("hi"), 0600) afero.WriteFile(fs, "dir/bar.t", []byte("hi"), 0600) testdata := []struct { arg string expected map[string]string err bool }{ {"bogus.t", nil, true}, {"foo.t", map[string]string{"foo.t": "foo.t"}, false}, {"foo=foo.t", map[string]string{"foo": "foo.t"}, false}, {"dir/foo.t", map[string]string{"dir/foo.t": "dir/foo.t"}, false}, {"foo=dir/foo.t", map[string]string{"foo": "dir/foo.t"}, false}, {"dir/", map[string]string{"dir/foo.t": "dir/foo.t", "dir/bar.t": "dir/bar.t"}, false}, {"t=dir/", map[string]string{"t/foo.t": "dir/foo.t", "t/bar.t": "dir/bar.t"}, false}, } for _, d := range testdata { nested := templateAliases{} err := parseTemplateArg(d.arg, nested) if d.err { assert.Error(t, err, d.arg) } else { assert.NoError(t, err, d.arg) assert.Equal(t, templateAliases(d.expected), nested, d.arg) } } } func TestParseTemplateArgs(t *testing.T) { fs = afero.NewMemMapFs() afero.WriteFile(fs, "foo.t", []byte("hi"), 0600) _ = fs.MkdirAll("dir", 0755) afero.WriteFile(fs, "dir/foo.t", []byte("hi"), 0600) afero.WriteFile(fs, "dir/bar.t", []byte("hi"), 0600) args := []string{"foo.t", "foo=foo.t", "bar=dir/foo.t", "dir/", "t=dir/", } expected := map[string]string{ "foo.t": "foo.t", "foo": "foo.t", "bar": "dir/foo.t", "dir/foo.t": "dir/foo.t", "dir/bar.t": "dir/bar.t", "t/foo.t": "dir/foo.t", "t/bar.t": "dir/bar.t", } nested, err := parseTemplateArgs(args) assert.NoError(t, err) assert.Equal(t, templateAliases(expected), nested) _, err = parseTemplateArgs([]string{"bogus.t"}) assert.Error(t, err) } func TestSimpleNamer(t *testing.T) { n := simpleNamer("out/") out, err := n("file") assert.NoError(t, err) expected := filepath.FromSlash("out/file") assert.Equal(t, expected, out) } func TestMappingNamer(t *testing.T) { g := &gomplate{funcMap: map[string]interface{}{ "foo": func() string { return "foo" }, }} n := mappingNamer("out/{{ .in }}", g) out, err := n("file") assert.NoError(t, err) expected := filepath.FromSlash("out/file") assert.Equal(t, expected, out) n = mappingNamer("out/{{ foo }}{{ .in }}", g) out, err = n("file") assert.NoError(t, err) expected = filepath.FromSlash("out/foofile") assert.Equal(t, expected, out) }
[ "\"USER\"" ]
[]
[ "USER" ]
[]
["USER"]
go
1
0
mlapi/app.py
import json # import sqlite3 import os import logging import coloredlogs coloredlogs.install(level='DEBUG', format='%(levelname)s%(asctime)s:%(message)s',) from flask import request, url_for, Response, jsonify, g, redirect from flask_api import FlaskAPI, status, exceptions from flask_api.decorators import set_parsers from flask_cors import CORS from flask_jwt import JWT, jwt_required, current_identity from werkzeug.security import safe_str_cmp # from mlapi.modelRouter import ModelRouterClass from mlapi.api_users_methods import create_user, delete_user, update_user from mlapi.helpers import err_tmplt from flask_api.parsers import JSONParser, URLEncodedParser, MultiPartParser from mlapi.parsers.imageParser import JpegImageParser, PngImageParser # from flask_httpauth import HTTPBasicAuth from flask_bcrypt import Bcrypt from .images import ImageStorage from models.modelsHolder import ModelsHolderClass from flask_sqlalchemy import SQLAlchemy from .shellColors import ShellColors bcrypt = "" database = "" app = "" jwt = "" MODELS_DIR = "./models/computed/" white_list = [ 'http://localhost:3000', 'http://localhost:8000', 'https://api.mlapi.io', 'https://demo.mlapi.io'] if os.popen('stty size', 'r').read(): rows, columns = os.popen('stty size', 'r').read().split() columns = int(columns) print('\n' + columns * "=") print(((columns-len(__name__))//2) * "=" + ShellColors.HEADER + __name__ + ShellColors.ENDC + ((columns -1 - len(__name__))//2 + 1) * "=") print(columns * "=" + '\n') else: print("======MLAPI.IO=======") def reduce_uses(fn): '''A decorator function that invokes the DB uses-reducing function. ''' def W1(*args, **kwargs): errors = [] if current_identity and request: if dbc.get_users_available_uses(current_identity['user_id']) == 0: errors.append("No token uses left for this user") else: dbc.reduce_uses(current_identity['user_id']) return fn(errors=errors, *args,**kwargs) W1.__name__ = fn.__name__ return W1 def save_request(response, data_type=None, data=None): '''Function invoking request saving in database ''' try: dbc.save_request( request_type=request.method, request_url=request.url, response = response, user_id = current_identity['user_id'] if current_identity else 0, is_xhr = request.is_xhr, headers = str(request.headers), data_type = data_type, data = data) except: logging.warning("There was an error while saving request data to DB. {}".format(request)) def errors_handler(errors=None): '''Wrapper for logging errors ''' if errors: for e in errors: logging.error(e) return errors def create_app(image_storage): '''Function returning Flask app instance ''' global bcrypt, database, jwt, app, dbc app = FlaskAPI(__name__) CORS(app, origins=white_list) models_holder_instance = ModelsHolderClass(MODELS_DIR) app_settings = os.getenv('APP_SETTINGS','db.config.DevelopmentConfig') app.config.from_object(app_settings) logging.info("Using {} settings.".format(app_settings)) bcrypt = Bcrypt(app) try: database = SQLAlchemy(app) except Exception: logging.critical("Couldn't connect to DB!") from db.dbConnection import DbConnectionClass dbc = DbConnectionClass(database, app.config) jwt = JWT(app, dbc.authenticate, dbc.identity) @app.route('/') def slash(): return redirect(url_for('root')) @app.route('/v2/', methods=['GET']) def root(): '''Function responding to request to "/v2/" route. Returns a list of possible routes. ''' text = { 'available_routes': [ "api.mlapi.io/v2/token - Check current token balance status [POST]", "api.mlapi.io/v2/test1 - Car recognition NN[GET, POST]", ] } return text @app.route('/v2/token', methods=['GET']) @jwt_required() def token(): '''Retrieve description of route or amount of uses left on token ''' return { "uses_left": str(dbc.get_users_available_uses(current_identity['user_id'])) } @app.route('/v2/user', methods=['POST', 'PATCH', 'DELETE']) @set_parsers(JSONParser) @jwt_required() def user(): '''Manage users. ''' ################################# #### METHODS FOR ADMIN USERS ONLY if request.method == 'POST': req = request.data logging.debug(req) uses = req['uses'] if 'uses' in req else 100 isa = req['is_admin'] if "is_admin" in req else False if 'email' not in req: return { 'error' : "No email given" }, status.HTTP_400_BAD_REQUEST return create_user(dbc, req['email'], uses, isa) elif request.method == 'DELETE': id = request.user_id return delete_user(id) #### METHODS FOR ADMIN USERS ONLY ################################# elif request.method == 'GET': pass elif request.method == 'PUT': pass ## Needed for unathorized pre-requests @app.route('/v2/test1', methods=['OPTIONS']) def test1_opt(): return "" @app.route('/v2/test1', methods=['GET', 'POST']) @jwt_required() @set_parsers(JSONParser, JpegImageParser, PngImageParser, MultiPartParser) @reduce_uses def test1(errors=None): '''Responds with predictions on the sent image on POST. Returns description on GET request. Accepted formats: image/jpeg, image/png, application/json with an image in Base64 format. ''' if errors: return {"result":errors_handler(errors)} model_name = 'test1' logging.debug("REQUEST: {}".format(repr(request))) if request.method == 'GET': return { "description" : "Make an authenticated POST request for predicting the image. POST binary file with proper header or { 'image' : 'BASE64 image' }", "accepted_content_type" : [ "image/jpeg", "image/png", "application/json" ] } elif request.method == 'POST': # logging.debug("Got files from client: >>> {} <<<".format(request.files)) if request.files: val = request.files['file'] path = image_storage.save(val, request.headers['Content-Type']) response = { "result" : models_holder_instance.sendRequest(model_name, path) } save_request( response = str(response['result']), data_type = "I", data = path) return response elif request.data: path = image_storage.save(request.data, request.headers['Content-Type']) response = { "result" : models_holder_instance.sendRequest(model_name, path) } save_request( response = str(response['result']), data_type = "I", data = path) return response else: return { "result":"You provided no data" } return app, bcrypt, database, image_storage, jwt def get_app(): image_storage = ImageStorage(storage_path = "./images/") return create_app(image_storage) if __name__ == "mlapi.app": app, bcrypt, database, image_storage, jwt = application, bcrypt, database, image_storage, jwt = get_app()
[]
[]
[ "APP_SETTINGS" ]
[]
["APP_SETTINGS"]
python
1
0
core/src/main/java/org/zstack/core/Platform.java
package org.zstack.core; import org.apache.commons.io.FileUtils; import org.apache.commons.lang.LocaleUtils; import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.StringUtils; import org.reflections.Reflections; import org.springframework.beans.factory.BeanFactory; import org.springframework.context.MessageSource; import org.springframework.context.NoSuchMessageException; import org.springframework.web.context.WebApplicationContext; import org.zstack.core.cloudbus.CloudBus; import org.zstack.core.componentloader.ComponentLoader; import org.zstack.core.componentloader.ComponentLoaderImpl; import org.zstack.core.config.GlobalConfigFacade; import org.zstack.core.db.DatabaseFacade; import org.zstack.core.db.DatabaseGlobalProperty; import org.zstack.core.db.Q; import org.zstack.core.encrypt.EncryptRSA; import org.zstack.core.errorcode.ErrorFacade; import org.zstack.core.statemachine.StateMachine; import org.zstack.core.statemachine.StateMachineImpl; import org.zstack.header.Component; import org.zstack.header.core.StaticInit; import org.zstack.header.core.encrypt.ENCRYPT; import org.zstack.header.errorcode.*; import org.zstack.header.exception.CloudRuntimeException; import org.zstack.header.vo.BaseResource; import org.zstack.utils.*; import org.zstack.utils.data.StringTemplate; import org.zstack.utils.logging.CLogger; import org.zstack.utils.logging.CLoggerImpl; import org.zstack.utils.network.NetworkUtils; import org.zstack.utils.path.PathUtil; import org.zstack.utils.string.ErrorCodeElaboration; import org.zstack.utils.string.StringSimilarity; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.lang.management.ManagementFactory; import java.lang.management.RuntimeMXBean; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.net.InetAddress; import java.net.NetworkInterface; import java.net.SocketException; import java.sql.Timestamp; import java.util.*; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import static org.zstack.utils.CollectionDSL.e; import static org.zstack.utils.CollectionDSL.map; import static org.zstack.utils.StringDSL.ln; public class Platform { private static final CLogger logger = CLoggerImpl.getLogger(Platform.class); private static ComponentLoader loader; private static String msId; private static String managementServerIp; private static String managementServerCidr; private static MessageSource messageSource; private static String encryptionKey = EncryptRSA.generateKeyString("ZStack open source"); private static EncryptRSA rsa = new EncryptRSA(); private static Map<String, Double> errorCounter = new HashMap<>(); public static final String COMPONENT_CLASSPATH_HOME = "componentsHome"; public static final String FAKE_UUID = "THIS_IS_A_FAKE_UUID"; private static final Map<String, String> globalProperties = new HashMap<String, String>(); private static Locale locale; public static volatile boolean IS_RUNNING = true; private static Reflections reflections = BeanUtils.reflections; public static Reflections getReflections() { return reflections; } public static Set<Method> encryptedMethodsMap; public static Map<String, String> childResourceToBaseResourceMap = new HashMap<>(); static Map<Class, DynamicObjectMetadata> dynamicObjectMetadata = new HashMap<>(); public static Locale getLocale() { return locale; } private static Map<String, String> linkGlobalPropertyMap(String prefix) { Map<String, String> ret = new HashMap<String, String>(); Map<String, String> map = getGlobalPropertiesStartWith(prefix); if (map.isEmpty()) { return ret; } for (Map.Entry<String, String> e : map.entrySet()) { String key = StringDSL.stripStart(e.getKey(), prefix).trim(); ret.put(key, e.getValue().trim()); } return ret; } private static void linkGlobalProperty(Class clz, Map<String, String> propertiesMap) { for (Field f : clz.getDeclaredFields()) { GlobalProperty at = f.getAnnotation(GlobalProperty.class); if (at == null) { continue; } if (!Modifier.isStatic(f.getModifiers())) { throw new CloudRuntimeException(String.format("%s.%s is annotated by @GlobalProperty but it's not defined with static modifier", clz.getName(), f.getName())); } Object valueToSet = null; String name = at.name(); if (Map.class.isAssignableFrom(f.getType())) { Map<String, String> ret = linkGlobalPropertyMap(name); if (ret.isEmpty() && at.required()) { throw new IllegalArgumentException(String.format("A required global property[%s] missing in zstack.properties", name)); } if (at.encrypted()) { ret.forEach((k, v) -> ret.put(k, rsa.decrypt(v, encryptionKey))); } valueToSet = ret; } else if (List.class.isAssignableFrom(f.getType())) { List<String> ret = linkGlobalPropertyList(name); if (ret.isEmpty() && at.required()) { throw new IllegalArgumentException(String.format("A required global property[%s] missing in zstack.properties", name)); } if (at.encrypted()) { ret = ret.stream().map(it -> rsa.decrypt(it, encryptionKey)).collect(Collectors.toList()); } valueToSet = ret; } else { String value = propertiesMap.get(name); if (value == null && at.defaultValue().equals(GlobalProperty.DEFAULT_NULL_STRING) && at.required()) { throw new IllegalArgumentException(String.format("A required global property[%s] missing in zstack.properties", name)); } if (value == null) { value = at.defaultValue(); } if (GlobalProperty.DEFAULT_NULL_STRING.equals(value)) { value = null; } if (value != null) { if (at.encrypted()) { value = rsa.decrypt(value, encryptionKey); } value = StringTemplate.substitute(value, propertiesMap); } if (Integer.class.isAssignableFrom(f.getType()) || Integer.TYPE.isAssignableFrom(f.getType())) { valueToSet = TypeUtils.stringToValue(value, Integer.class, 0); } else if (Long.class.isAssignableFrom(f.getType()) || Long.TYPE.isAssignableFrom(f.getType())) { valueToSet = TypeUtils.stringToValue(value, Long.class, 0L); } else if (Float.class.isAssignableFrom(f.getType()) || Float.TYPE.isAssignableFrom(f.getType())) { valueToSet = TypeUtils.stringToValue(value, Float.class, 0F); } else if (Double.class.isAssignableFrom(f.getType()) || Double.TYPE.isAssignableFrom(f.getType())) { valueToSet = TypeUtils.stringToValue(value, Double.class, 0D); } else if (String.class.isAssignableFrom(f.getType())) { valueToSet = value; } else if (Boolean.class.isAssignableFrom(f.getType()) || Boolean.TYPE.isAssignableFrom(f.getType())) { valueToSet = TypeUtils.stringToValue(value, Boolean.class); } else { throw new CloudRuntimeException(String.format("%s.%s of type[%s] is unsupported by global property. try use Platform.getGlobalProperty() and parse by yourself", clz.getName(), f.getName(), f.getType().getName())); } } f.setAccessible(true); try { f.set(null, valueToSet); globalProperties.put(name, valueToSet == null ? "null" : valueToSet.toString()); if (logger.isTraceEnabled()) { logger.trace(String.format("linked global property[%s.%s], value: %s", clz.getName(), f.getName(), valueToSet)); } } catch (IllegalAccessException e) { throw new CloudRuntimeException(String.format("unable to link global property[%s.%s]", clz.getName(), f.getName()), e); } } } public static Map<String, String> getGlobalProperties() { return globalProperties; } private static List<String> linkGlobalPropertyList(String name) { Map<String, String> map = getGlobalPropertiesStartWith(name); List<String> ret = new ArrayList<String>(map.size()); if (map.isEmpty()) { return ret; } List<String> orderedKeys = new ArrayList<String>(); orderedKeys.addAll(map.keySet()); Collections.sort(orderedKeys); for (String key : orderedKeys) { String index = StringDSL.stripStart(key, name).trim(); try { Long.parseLong(index); } catch (NumberFormatException e) { throw new IllegalArgumentException(String.format("[Illegal List Definition] %s is an invalid list key" + " definition, the last character must be a number, for example %s1. %s is not a number", key, key, index)); } ret.add(map.get(key)); } return ret; } private static void linkGlobalProperty() { Set<Class<?>> clzs = reflections.getTypesAnnotatedWith(GlobalPropertyDefinition.class); boolean noTrim = System.getProperty("DoNotTrimPropertyFile") != null; List<String> lst = new ArrayList<String>(); Map<String, String> propertiesMap = new HashMap<String, String>(); for (final String name: System.getProperties().stringPropertyNames()) { String value = System.getProperty(name); if (!noTrim) { value = value.trim(); } propertiesMap.put(name, value); lst.add(String.format("%s=%s", name, value)); } logger.debug(String.format("system properties:\n%s", StringUtils.join(lst, ","))); for (Class clz : clzs) { linkGlobalProperty(clz, propertiesMap); } } public static String getManagementPid() { if (CoreGlobalProperty.UNIT_TEST_ON) { return ""; } return ManagementFactory.getRuntimeMXBean().getName().split("@")[0]; } private static void writePidFile() throws IOException { if (CoreGlobalProperty.UNIT_TEST_ON) { return; } File pidFile = new File(CoreGlobalProperty.PID_FILE_PATH); if (pidFile.exists()) { String pidStr = FileUtils.readFileToString(pidFile); try { long pid = Long.parseLong(pidStr); String processProcDir = String.format("/proc/%s", pid); File processProcDirFile = new File(processProcDir); if (processProcDirFile.exists()) { throw new CloudRuntimeException(String.format("pid file[%s] exists and the process[pid:%s] that the pid file points to is still running", CoreGlobalProperty.PID_FILE_PATH, pidStr)); } } catch (NumberFormatException e) { logger.warn(String.format("pid file[%s] includes an invalid pid[%s] that is not a long number, ignore it", CoreGlobalProperty.PID_FILE_PATH, pidStr)); } logger.info(String.format("stale pid file[%s], ignore it", CoreGlobalProperty.PID_FILE_PATH)); } pidFile.deleteOnExit(); String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0]; FileUtils.writeStringToFile(pidFile, pid); } private static void prepareDefaultDbProperties() { if (DatabaseGlobalProperty.DbUrl != null) { String dbUrl = DatabaseGlobalProperty.DbUrl; if (dbUrl.endsWith("/")) { dbUrl = dbUrl.substring(0, dbUrl.length()-1); } if (getGlobalProperty("DbFacadeDataSource.jdbcUrl") == null) { String url; if (dbUrl.contains("{database}")) { url = ln(dbUrl).formatByMap( map(e("database", "zstack")) ); } else { url = String.format("%s/zstack", dbUrl); } System.setProperty("DbFacadeDataSource.jdbcUrl", url); logger.debug(String.format("default DbFacadeDataSource.jdbcUrl to DB.url [%s]", url)); } if (getGlobalProperty("RESTApiDataSource.jdbcUrl") == null) { String url; if (dbUrl.contains("{database}")) { url = ln(dbUrl).formatByMap( map(e("database", "zstack_rest")) ); } else { url = String.format("%s/zstack_rest", dbUrl); } System.setProperty("RESTApiDataSource.jdbcUrl", url); logger.debug(String.format("default RESTApiDataSource.jdbcUrl to DB.url [%s]", url)); } } if (DatabaseGlobalProperty.DbUser != null) { if (getGlobalProperty("DbFacadeDataSource.user") == null) { System.setProperty("DbFacadeDataSource.user", DatabaseGlobalProperty.DbUser); logger.debug(String.format("default DbFacadeDataSource.user to DB.user [%s]", DatabaseGlobalProperty.DbUser)); } if (getGlobalProperty("RESTApiDataSource.user") == null) { System.setProperty("RESTApiDataSource.user", DatabaseGlobalProperty.DbUser); logger.debug(String.format("default RESTApiDataSource.user to DB.user [%s]", DatabaseGlobalProperty.DbUser)); } } if (DatabaseGlobalProperty.DbPassword != null) { if (getGlobalProperty("DbFacadeDataSource.password") == null) { System.setProperty("DbFacadeDataSource.password", DatabaseGlobalProperty.DbPassword); logger.debug(String.format("default DbFacadeDataSource.password to DB.password [%s]", DatabaseGlobalProperty.DbPassword)); } if (getGlobalProperty("RESTApiDataSource.password") == null) { System.setProperty("RESTApiDataSource.password", DatabaseGlobalProperty.DbPassword); logger.debug(String.format("default RESTApiDataSource.password to DB.password [%s]", DatabaseGlobalProperty.DbPassword)); } } if (DatabaseGlobalProperty.DbMaxIdleTime != null) { if (getGlobalProperty("DbFacadeDataSource.maxIdleTime") == null) { System.setProperty("DbFacadeDataSource.maxIdleTime", DatabaseGlobalProperty.DbMaxIdleTime); logger.debug(String.format("default DbFacadeDataSource.maxIdleTime to DB.maxIdleTime [%s]", DatabaseGlobalProperty.DbMaxIdleTime)); } if (getGlobalProperty("ExtraDataSource.maxIdleTime") == null) { System.setProperty("ExtraDataSource.maxIdleTime", DatabaseGlobalProperty.DbMaxIdleTime); logger.debug(String.format("default ExtraDataSource.maxIdleTime to DB.maxIdleTime [%s]", DatabaseGlobalProperty.DbMaxIdleTime)); } if (getGlobalProperty("RESTApiDataSource.maxIdleTime") == null) { System.setProperty("RESTApiDataSource.maxIdleTime", DatabaseGlobalProperty.DbMaxIdleTime); logger.debug(String.format("default RESTApiDataSource.maxIdleTime to DB.maxIdleTime [%s]", DatabaseGlobalProperty.DbMaxIdleTime)); } } if (DatabaseGlobalProperty.DbIdleConnectionTestPeriod != null) { if (getGlobalProperty("DbFacadeDataSource.idleConnectionTestPeriod") == null) { System.setProperty("DbFacadeDataSource.idleConnectionTestPeriod", DatabaseGlobalProperty.DbIdleConnectionTestPeriod); logger.debug(String.format("default DbFacadeDataSource.idleConnectionTestPeriod to DB.idleConnectionTestPeriod [%s]", DatabaseGlobalProperty.DbIdleConnectionTestPeriod)); } if (getGlobalProperty("ExtraDataSource.idleConnectionTestPeriod") == null) { System.setProperty("ExtraDataSource.idleConnectionTestPeriod", DatabaseGlobalProperty.DbIdleConnectionTestPeriod); logger.debug(String.format("default ExtraDataSource.idleConnectionTestPeriod to DB.idleConnectionTestPeriod [%s]", DatabaseGlobalProperty.DbIdleConnectionTestPeriod)); } if (getGlobalProperty("RESTApiDataSource.idleConnectionTestPeriod") == null) { System.setProperty("RESTApiDataSource.idleConnectionTestPeriod", DatabaseGlobalProperty.DbIdleConnectionTestPeriod); logger.debug(String.format("default RESTApiDataSource.idleConnectionTestPeriod to DB.idleConnectionTestPeriod [%s]", DatabaseGlobalProperty.DbIdleConnectionTestPeriod)); } } } static { FileInputStream in = null; try { Set<Class> baseResourceClasses = reflections.getTypesAnnotatedWith(BaseResource.class).stream() .filter(clz -> clz.isAnnotationPresent(BaseResource.class)).collect(Collectors.toSet()); for (Class clz : baseResourceClasses) { Set<Class> childResourceClasses = reflections.getSubTypesOf(clz); childResourceToBaseResourceMap.put(clz.getSimpleName(), clz.getSimpleName()); for (Class child : childResourceClasses) { childResourceToBaseResourceMap.put(child.getSimpleName(), clz.getSimpleName()); } } File globalPropertiesFile = PathUtil.findFileOnClassPath("zstack.properties", true); in = new FileInputStream(globalPropertiesFile); System.getProperties().load(in); // get ms ip should after global property setup msId = UUID.nameUUIDFromBytes(getManagementServerIp().getBytes()).toString().replaceAll("-", ""); collectDynamicObjectMetadata(); linkGlobalProperty(); prepareDefaultDbProperties(); callStaticInitMethods(); encryptedMethodsMap = getAllEncryptPassword(); writePidFile(); } catch (Throwable e) { logger.warn(String.format("unhandled exception when in Platform's static block, %s", e.getMessage()), e); new BootErrorLog().write(e.getMessage()); if (CoreGlobalProperty.EXIT_JVM_ON_BOOT_FAILURE) { System.exit(1); } else { throw new RuntimeException(e); } } finally { if (in != null) { try { in.close(); } catch (IOException e) { logger.warn(String.format("FileInputStream close IOException:%s", e.getMessage())); } } } } private static void collectDynamicObjectMetadata() { reflections.getSubTypesOf(DynamicObject.class).forEach(clz -> { DynamicObjectMetadata metadata = new DynamicObjectMetadata(); FieldUtils.getAllFields(clz).forEach(f -> { f.setAccessible(true); metadata.fields.put(f.getName(), f); }); Class p = clz; while (p != Object.class) { for (Method m : p.getDeclaredMethods()) { m.setAccessible(true); metadata.methods.put(m.getName(), m); } p = p.getSuperclass(); } dynamicObjectMetadata.put(clz, metadata); }); } public static String getBaseResourceType(String childResourceType) { String type = childResourceToBaseResourceMap.get(childResourceType); if (type == null) { type = childResourceType; } return type; } public static List<String> getAllChildrenResourceType(String baseResourceType) { return childResourceToBaseResourceMap.entrySet() .stream() .filter(map -> baseResourceType.equals(map.getValue())) .map(Map.Entry::getKey) .collect(Collectors.toList()); } private static Set<Method> getAllEncryptPassword() { Set<Method> encrypteds = reflections.getMethodsAnnotatedWith(ENCRYPT.class); for (Method encrypted: encrypteds) { logger.debug(String.format("found encrypted method[%s:%s]", encrypted.getDeclaringClass(), encrypted.getName())); } return encrypteds; } private static void callStaticInitMethods() throws InvocationTargetException, IllegalAccessException { List<Method> inits = new ArrayList<>(reflections.getMethodsAnnotatedWith(StaticInit.class)); inits.sort((o1, o2) -> { StaticInit a1 = o1.getAnnotation(StaticInit.class); StaticInit a2 = o2.getAnnotation(StaticInit.class); return a2.order() - a1.order(); }); for (Method init : inits) { if (!Modifier.isStatic(init.getModifiers())) { throw new CloudRuntimeException(String.format("the method[%s:%s] annotated by @StaticInit is not a static method", init.getDeclaringClass(), init.getName())); } logger.debug(String.format("calling static init method[%s:%s]", init.getDeclaringClass(), init.getName())); init.setAccessible(true); init.invoke(null); } } private static void initMessageSource() { locale = LocaleUtils.toLocale(CoreGlobalProperty.LOCALE); logger.debug(String.format("using locale[%s] for i18n logging messages", locale.toString())); if (loader == null) { throw new CloudRuntimeException("ComponentLoader is null. i18n has not been initialized, you call it too early"); } BeanFactory beanFactory = loader.getSpringIoc(); if (beanFactory == null) { throw new CloudRuntimeException("BeanFactory is null. i18n has not been initialized, you call it too early"); } if (!(beanFactory instanceof MessageSource)) { throw new CloudRuntimeException("BeanFactory is not a spring MessageSource. i18n cannot be used"); } messageSource = (MessageSource)beanFactory; } private static CloudBus bus; { Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() { @Override public void run() { if (bus != null) { bus.stop(); } } })); } public static String getGlobalProperty(String name) { return System.getProperty(name); } public static String getGlobalPropertyAnnotationName(Class clz, String fieldName) { try { String name = clz.getDeclaredField(fieldName).getAnnotation(GlobalProperty.class).name().trim(); /* remove the last character '.' */ return name.substring(0, name.length() - 1); } catch (Exception e) { return ""; } } public static Map<String, String> getGlobalPropertiesStartWith(String prefix) { Properties props = System.getProperties(); Enumeration e = props.propertyNames(); Map<String, String> ret = new HashMap<String, String>(); while (e.hasMoreElements()) { String key = (String) e.nextElement(); if (key.startsWith(prefix)) { ret.put(key, System.getProperty(key)); } } return ret; } public static ComponentLoader createComponentLoaderFromWebApplicationContext(WebApplicationContext webAppCtx) { assert loader == null; try { if (webAppCtx != null) { loader = new ComponentLoaderImpl(webAppCtx); } else { loader = new ComponentLoaderImpl(); } } catch (Exception e) { String err = "unable to create ComponentLoader"; logger.warn(e.getMessage(), e); throw new CloudRuntimeException(err); } loader.getPluginRegistry(); GlobalConfigFacade gcf = loader.getComponent(GlobalConfigFacade.class); if (gcf != null) { ((Component)gcf).start(); } bus = loader.getComponentNoExceptionWhenNotExisting(CloudBus.class); if (bus != null) { bus.start(); } initMessageSource(); return loader; } public static ComponentLoader getComponentLoader() { /* * This part cannot be moved to static block at the beginning. * Because component code loaded by Spring may call other functions in Platform which * causes the static block to be executed, which results in cycle initialization of ComponentLoaderImpl. */ if (loader == null) { loader = createComponentLoaderFromWebApplicationContext(null); } return loader; } public static String getManagementServerId() { return msId; } public static <K extends Enum<K>, T extends Enum<T>> StateMachine<K, T> createStateMachine() { return new StateMachineImpl<K, T>(); } public static String getUuid() { return UUID.randomUUID().toString().replace("-", ""); } public static String getUuidFromBytes(byte[] name) { return UUID.nameUUIDFromBytes(name).toString().replace("-", ""); } public static String getManagementServerIp() { if (managementServerIp == null) { managementServerIp = getManagementServerIpInternal(); } return managementServerIp; } private static String getManagementServerCidrInternal() { String mgtIp = getManagementServerIp(); /*# ip add | grep 10.86.4.132 inet 10.86.4.132/23 brd 10.86.5.255 scope global br_eth0*/ /* because Linux.shell can not run command with '|', pares the output of ip address in java */ Linux.ShellResult ret = Linux.shell("ip -4 add"); for (String line : ret.getStdout().split("\\n")) { if (line.contains(mgtIp)) { line = line.trim(); try { return NetworkUtils.getNetworkAddressFromCidr(line.split(" ")[1]); } catch (RuntimeException e) { return null; } } } return null; } public static String getManagementServerCidr() { if (managementServerCidr == null) { managementServerCidr = getManagementServerCidrInternal(); } return managementServerCidr; } private static String getManagementServerIpInternal() { String ip = System.getProperty("management.server.ip"); if (ip != null) { logger.info(String.format("get management IP[%s] from Java property[management.server.ip]", ip)); return ip; } ip = System.getenv("ZSTACK_MANAGEMENT_SERVER_IP"); if (ip != null) { logger.info(String.format("get management IP[%s] from environment variable[ZSTACK_MANAGEMENT_SERVER_IP]", ip)); return ip; } Linux.ShellResult ret = Linux.shell("/sbin/ip route"); String defaultLine = null; for (String s : ret.getStdout().split("\n")) { if (s.contains("default via")) { defaultLine = s; break; } } String err = "cannot get management server ip of this machine. there are three ways to get the ip.\n1) search for 'management.server.ip' java property\n2) search for 'ZSTACK_MANAGEMENT_SERVER_IP' environment variable\n3) search for default route printed out by '/sbin/ip route'\nhowever, all above methods failed"; if (defaultLine == null) { throw new CloudRuntimeException(err); } try { Enumeration<NetworkInterface> nets = NetworkInterface.getNetworkInterfaces(); for (NetworkInterface iface : Collections.list(nets)) { String name = iface.getName(); if (defaultLine.contains(name)) { InetAddress ia = iface.getInetAddresses().nextElement(); ip = ia.getHostAddress(); break; } } } catch (SocketException e) { throw new CloudRuntimeException(e); } if (ip == null) { throw new CloudRuntimeException(err); } logger.info(String.format("get management IP[%s] from default route[/sbin/ip route]", ip)); return ip; } public static String toI18nString(String code, Object... args) { return toI18nString(code, null, args); } public static String toI18nString(String code, Locale l, List args) { return toI18nString(code, l, args.toArray(new Object[args.size()])); } private static String stringFormat(String fmt, Object...args) { if (args == null || args.length == 0) { return fmt; } else { return String.format(fmt, args); } } public static String toI18nString(String code, Locale l, Object...args) { l = l == null ? locale : l; try { String ret; if (args.length > 0) { ret = messageSource.getMessage(code, args, l); } else { ret = messageSource.getMessage(code, null, l); } // if the result is an empty string which means the string is not translated in the locale, // return the original string so users won't get a confusing, empty string return ret.isEmpty() ? stringFormat(code, args) : ret; } catch (NoSuchMessageException e) { return stringFormat(code, args); } } public static String i18n(String str, Object...args) { return toI18nString(str, args); } public static String i18n(String str, Map<String, String> args) { Map<String, String> nargs = new HashMap<>(); args.forEach((k, v) -> nargs.put(k, toI18nString(v))); return ln(toI18nString(str)).formatByMap(nargs); } public static boolean killProcess(int pid) { return killProcess(pid, 15); } public static boolean killProcess(int pid, Integer timeout) { timeout = timeout == null ? 30 : timeout; if (!TimeUtils.loopExecuteUntilTimeoutIgnoreExceptionAndReturn(timeout, 1, TimeUnit.SECONDS, () -> { ShellUtils.runAndReturn(String.format("kill %s", pid)); return !new ProcessFinder().processExists(pid); })) { logger.warn(String.format("cannot kill the process[PID:%s] after %s seconds, kill -9 it", pid, timeout)); ShellUtils.runAndReturn(String.format("kill -9 %s", pid)); } if (!TimeUtils.loopExecuteUntilTimeoutIgnoreExceptionAndReturn(5, 1, TimeUnit.SECONDS, () -> !new ProcessFinder().processExists(pid))) { logger.warn(String.format("FAILED TO KILL -9 THE PROCESS[PID:%s], THE KERNEL MUST HAVE SOMETHING RUN", pid)); return false; } else { return true; } } private synchronized static void insertLogError(String content, ErrorCodeElaboration err, boolean matched) { if (!CoreGlobalProperty.RECORD_TO_DB_ELABORATION) { return; } DatabaseFacade dbf = getComponentLoader().getComponent(DatabaseFacade.class); String md5Sum = StringDSL.getMd5Sum(content); ElaborationVO mvo = Q.New(ElaborationVO.class).eq(ElaborationVO_.md5sum, md5Sum).find(); if (mvo != null) { mvo.setDistance(err.getDistance()); mvo.setMatched(matched); mvo.setRepeats(mvo.getRepeats() + 1); dbf.updateAndRefresh(mvo); } else { mvo = new ElaborationVO(); mvo.setDistance(err.getDistance()); mvo.setRepeats(1L); mvo.setMatched(matched); mvo.setMd5sum(md5Sum); mvo.setErrorInfo(content); dbf.persistAndRefresh(mvo); } } private static ErrorCodeElaboration elaborate(String description) { ErrorCodeElaboration elaboration = StringSimilarity.findSimilary(description); if (elaboration != null) { String formatStr = elaboration.getFormatSrcError(); if (StringSimilarity.matched(elaboration)) { insertLogError(formatStr, elaboration, true); return elaboration; } else { insertLogError(formatStr, elaboration, false); return null; } } return null; } private static List<Enum> excludeCode = CollectionDSL.list(SysErrors.INTERNAL, SysErrors.OPERATION_ERROR, SysErrors.INVALID_ARGUMENT_ERROR, SysErrors.TIMEOUT); private static ErrorCodeElaboration elaborate(Enum errCode, String description, String details, Object...args) { ErrorCodeElaboration elaboration = StringSimilarity.findSimilary(details, args); if (elaboration != null) { String formatStr = elaboration.getFormatSrcError(); if (StringSimilarity.matched(elaboration)) { insertLogError(formatStr, elaboration, true); return elaboration; } else { if (excludeCode.contains(errCode)) { insertLogError(formatStr, elaboration, false); return null; } } } if (!excludeCode.contains(errCode)) { return elaborate(description); } else { return null; } } public static ErrorCode err(Enum errCode, String fmt, Object...args) { return err(errCode, null, fmt, args); } public static ErrorCode err(Enum errCode, ErrorCode cause, String fmt, Object...args) { ErrorFacade errf = getComponentLoader().getComponent(ErrorFacade.class); String details = null; if (fmt != null) { try { details = SysErrors.INTERNAL == errCode ? String.format(fmt, args) : toI18nString(fmt, args); } catch (Exception e) { logger.warn("exception happened when format error message"); logger.warn(e.getMessage()); details = fmt; } } ErrorCode result = errf.instantiateErrorCode(errCode, details, cause); // start to generate elaboration... if (CoreGlobalProperty.ENABLE_ELABORATION) { try { ErrorCode coreError = cause == null ? getCoreError(result) : getCoreError(cause); // use the core cause as elaboration if it existed if (coreError.getElaboration() != null) { result.setCost(coreError.getCost()); result.setElaboration(coreError.getElaboration()); result.setMessages(coreError.getMessages()); } else if (cause instanceof ErrorCodeList && ((ErrorCodeList) cause).getCauses() != null) { // suppose elaborations are existed in causes... ErrorCodeList errList = (ErrorCodeList)cause; String costs = null; String elas = null; ErrorCodeElaboration messages = null; for (ErrorCode c: errList.getCauses()) { ErrorCode lcError = getCoreError(c); if (lcError.getElaboration() != null) { costs = costs == null ? lcError.getCost() : addTwoCosts(costs, lcError.getCost()); elas = elas == null ? lcError.getElaboration() : String.join(",", elas, lcError.getElaboration()); messages = messages == null ? lcError.getMessages() : messages.addElaborationMessage(lcError.getMessages()); } } result.setCost(costs); result.setElaboration(elas); result.setMessages(messages); } if (result.getElaboration() == null) { long start = System.currentTimeMillis(); ErrorCodeElaboration ela = elaborate(errCode, result.getDescription(), fmt, args); if (ela != null) { long end = System.currentTimeMillis(); result.setCost((end - start) + "ms"); result.setElaboration(StringSimilarity.formatElaboration(ela, args)); result.setMessages(new ErrorCodeElaboration(ela.getCode(), ela.getMessage_en(), ela.getMessage_cn(), ela.getDistance(), ela.getMethod(), args)); } } } catch (Throwable e) { logger.warn("exception happened when found elaboration"); logger.warn(e.getMessage()); } } addErrorCounter(result); return result; } private static String addTwoCosts(String origin, String increase) { long c1 = Long.parseLong(origin.substring(0, origin.length() - 2).trim()); long c2 = Long.parseLong(increase.substring(0, increase.length() - 2).trim()); return (c1 + c2) + "ms"; } private static ErrorCode getCoreError(ErrorCode result) { if (result.getCause() == null) { return result; } else { return getCoreError(result.getCause()); } } public static ErrorCode inerr(String fmt, Object...args) { return err(SysErrors.INTERNAL, fmt, args); } // format error code from expand components public static ErrorCode experr(String fmt, String err, Object...args) { return operr(fmt, err, args); } public static ErrorCode operr(String fmt, Object...args) { return err(SysErrors.OPERATION_ERROR, fmt, args); } public static ErrorCode operr(ErrorCode cause, String fmt, Object...args) { return err(SysErrors.OPERATION_ERROR, cause, fmt, args); } public static ErrorCode argerr(String fmt, Object...args) { return err(SysErrors.INVALID_ARGUMENT_ERROR, fmt, args); } public static ErrorCode touterr(String fmt, Object...args) { return err(SysErrors.TIMEOUT, fmt, args); } public static ErrorCode touterr(ErrorCode cause, String fmt, Object...args) { return err(SysErrors.TIMEOUT, cause, fmt, args); } public static ErrorCode ioerr(String fmt, Object...args) { return err(SysErrors.IO_ERROR, fmt, args); } public static ErrorCode httperr(String fmt, Object...args) { return err(SysErrors.HTTP_ERROR, fmt, args); } public static Function<Supplier, Object> functionForMockTestObject = (Supplier t) -> t.get(); // This is to make objects created by keyword 'new' mockable // developers call this method as a factory method like: // // JavaMailSenderImpl sender = Platform.New(()-> new JavaMailSenderImpl()); // // in unit tests, we can replace functionForMockTestObject with a function which returns a mocked // object, for example: // // Platform.functionForMockTestObject = (Supplier t) -? { // Object obj = t.get(); // return Mockito.spy(obj); // } public static <T> T New(Supplier supplier) { return (T) functionForMockTestObject.apply(supplier); } public static final String EXIT_REASON = "zstack.quit.reason"; public static final String SKIP_STOP = "skip.mn.exit"; public static void exit(String reason) { new BootErrorLog().write(reason); System.setProperty(EXIT_REASON, reason); System.exit(1); } public static String randomAlphanumeric(int count) { return RandomStringUtils.randomAlphanumeric(count); } public static boolean isAfterManagementNodeStart(Timestamp ts) { RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean(); Timestamp startMnTime = new Timestamp(bean.getStartTime()); return ts.after(startMnTime); } public static void addErrorCounter(ErrorCode code) { errorCounter.compute(code.getCode().split("\\.")[0], (k, v) -> v == null ? 1 : v ++); } public static Map<String, Double> getErrorCounter() { return errorCounter; } }
[ "\"ZSTACK_MANAGEMENT_SERVER_IP\"" ]
[]
[ "ZSTACK_MANAGEMENT_SERVER_IP" ]
[]
["ZSTACK_MANAGEMENT_SERVER_IP"]
java
1
0
src/shotgunEventDaemon.py
#!/usr/bin/env python # # Init file for Shotgun event daemon # # chkconfig: 345 99 00 # description: Shotgun event daemon # ### BEGIN INIT INFO # Provides: shotgunEvent # Required-Start: $network # Should-Start: $remote_fs # Required-Stop: $network # Should-Stop: $remote_fs # Default-Start: 2 3 4 5 # Short-Description: Shotgun event daemon # Description: Shotgun event daemon ### END INIT INFO """ For an overview of shotgunEvents, please see raw documentation in the docs folder or an html compiled version at: http://shotgunsoftware.github.com/shotgunEvents """ from __future__ import print_function __version__ = "1.0" __version_info__ = (1, 0) # Suppress the deprecation warning about imp until we get around to replacing it import warnings with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) import imp import datetime import logging import logging.handlers import os import pprint import socket import sys import time import traceback from six.moves import configparser import six.moves.cPickle as pickle from distutils.version import StrictVersion if sys.platform == "win32": import win32serviceutil import win32service import win32event import servicemanager import daemonizer import shotgun_api3 as sg from shotgun_api3.lib.sgtimezone import SgTimezone SG_TIMEZONE = SgTimezone() CURRENT_PYTHON_VERSION = StrictVersion(sys.version.split()[0]) PYTHON_26 = StrictVersion("2.6") PYTHON_27 = StrictVersion("2.7") EMAIL_FORMAT_STRING = """Time: %(asctime)s Logger: %(name)s Path: %(pathname)s Function: %(funcName)s Line: %(lineno)d %(message)s""" def _setFilePathOnLogger(logger, path): # Remove any previous handler. _removeHandlersFromLogger(logger, logging.handlers.TimedRotatingFileHandler) # Add the file handler handler = logging.handlers.TimedRotatingFileHandler( path, "midnight", backupCount=10 ) handler.setFormatter( logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ) logger.addHandler(handler) def _removeHandlersFromLogger(logger, handlerTypes=None): """ Remove all handlers or handlers of a specified type from a logger. @param logger: The logger who's handlers should be processed. @type logger: A logging.Logger object @param handlerTypes: A type of handler or list/tuple of types of handlers that should be removed from the logger. If I{None}, all handlers are removed. @type handlerTypes: L{None}, a logging.Handler subclass or I{list}/I{tuple} of logging.Handler subclasses. """ for handler in logger.handlers: if handlerTypes is None or isinstance(handler, handlerTypes): logger.removeHandler(handler) def _addMailHandlerToLogger( logger, smtpServer, fromAddr, toAddrs, emailSubject, username=None, password=None, secure=None, ): """ Configure a logger with a handler that sends emails to specified addresses. The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}. @note: Any SMTPHandler already connected to the logger will be removed. @param logger: The logger to configure @type logger: A logging.Logger instance @param toAddrs: The addresses to send the email to. @type toAddrs: A list of email addresses that will be passed on to the SMTPHandler. """ if smtpServer and fromAddr and toAddrs and emailSubject: mailHandler = CustomSMTPHandler( smtpServer, fromAddr, toAddrs, emailSubject, (username, password), secure ) mailHandler.setLevel(logging.ERROR) mailFormatter = logging.Formatter(EMAIL_FORMAT_STRING) mailHandler.setFormatter(mailFormatter) logger.addHandler(mailHandler) class Config(configparser.SafeConfigParser): def __init__(self, path): configparser.SafeConfigParser.__init__(self, os.environ) self.read(path) def getShotgunURL(self): return self.get("shotgun", "server") def getEngineScriptName(self): return self.get("shotgun", "name") def getEngineScriptKey(self): return self.get("shotgun", "key") def getEngineProxyServer(self): try: proxy_server = self.get("shotgun", "proxy_server").strip() if not proxy_server: return None return proxy_server except configparser.NoOptionError: return None def getEventIdFile(self): return self.get("daemon", "eventIdFile") def getEnginePIDFile(self): return self.get("daemon", "pidFile") def getPluginPaths(self): return [s.strip() for s in self.get("plugins", "paths").split(",")] def getSMTPServer(self): return self.get("emails", "server") def getSMTPPort(self): if self.has_option("emails", "port"): return self.getint("emails", "port") return 25 def getFromAddr(self): return self.get("emails", "from") def getToAddrs(self): return [s.strip() for s in self.get("emails", "to").split(",")] def getEmailSubject(self): return self.get("emails", "subject") def getEmailUsername(self): if self.has_option("emails", "username"): return self.get("emails", "username") return None def getEmailPassword(self): if self.has_option("emails", "password"): return self.get("emails", "password") return None def getSecureSMTP(self): if self.has_option("emails", "useTLS"): return self.getboolean("emails", "useTLS") or False return False def getLogMode(self): return self.getint("daemon", "logMode") def getLogLevel(self): return self.getint("daemon", "logging") def getMaxEventBatchSize(self): if self.has_option("daemon", "max_event_batch_size"): return self.getint("daemon", "max_event_batch_size") return 500 def getLogFile(self, filename=None): if filename is None: if self.has_option("daemon", "logFile"): filename = self.get("daemon", "logFile") else: raise ConfigError("The config file has no logFile option.") if self.has_option("daemon", "logPath"): path = self.get("daemon", "logPath") if not os.path.exists(path): os.makedirs(path) elif not os.path.isdir(path): raise ConfigError( "The logPath value in the config should point to a directory." ) path = os.path.join(path, filename) else: path = filename return path def getTimingLogFile(self): if ( not self.has_option("daemon", "timing_log") or self.get("daemon", "timing_log") != "on" ): return None return self.getLogFile() + ".timing" class Engine(object): """ The engine holds the main loop of event processing. """ def __init__(self, configPath): """ """ self._continue = True self._eventIdData = {} # Read/parse the config self.config = Config(configPath) # Get config values self._pluginCollections = [ PluginCollection(self, s) for s in self.config.getPluginPaths() ] self._sg = sg.Shotgun( self.config.getShotgunURL(), self.config.getEngineScriptName(), self.config.getEngineScriptKey(), http_proxy=self.config.getEngineProxyServer(), ) self._max_conn_retries = self.config.getint("daemon", "max_conn_retries") self._conn_retry_sleep = self.config.getint("daemon", "conn_retry_sleep") self._fetch_interval = self.config.getint("daemon", "fetch_interval") self._use_session_uuid = self.config.getboolean("shotgun", "use_session_uuid") # Setup the loggers for the main engine if self.config.getLogMode() == 0: # Set the root logger for file output. rootLogger = logging.getLogger() rootLogger.config = self.config _setFilePathOnLogger(rootLogger, self.config.getLogFile()) print(self.config.getLogFile()) # Set the engine logger for email output. self.log = logging.getLogger("engine") self.setEmailsOnLogger(self.log, True) else: # Set the engine logger for file and email output. self.log = logging.getLogger("engine") self.log.config = self.config _setFilePathOnLogger(self.log, self.config.getLogFile()) self.setEmailsOnLogger(self.log, True) self.log.setLevel(self.config.getLogLevel()) # Setup the timing log file timing_log_filename = self.config.getTimingLogFile() if timing_log_filename: self.timing_logger = logging.getLogger("timing") self.timing_logger.setLevel(self.config.getLogLevel()) _setFilePathOnLogger(self.timing_logger, timing_log_filename) else: self.timing_logger = None super(Engine, self).__init__() def setEmailsOnLogger(self, logger, emails): # Configure the logger for email output _removeHandlersFromLogger(logger, logging.handlers.SMTPHandler) if emails is False: return smtpServer = self.config.getSMTPServer() smtpPort = self.config.getSMTPPort() fromAddr = self.config.getFromAddr() emailSubject = self.config.getEmailSubject() username = self.config.getEmailUsername() password = self.config.getEmailPassword() if self.config.getSecureSMTP(): secure = (None, None) else: secure = None if emails is True: toAddrs = self.config.getToAddrs() elif isinstance(emails, (list, tuple)): toAddrs = emails else: msg = "Argument emails should be True to use the default addresses, False to not send any emails or a list of recipient addresses. Got %s." raise ValueError(msg % type(emails)) _addMailHandlerToLogger( logger, (smtpServer, smtpPort), fromAddr, toAddrs, emailSubject, username, password, secure, ) def start(self): """ Start the processing of events. The last processed id is loaded up from persistent storage on disk and the main loop is started. """ # TODO: Take value from config socket.setdefaulttimeout(60) # Notify which version of shotgun api we are using self.log.info("Using SG Python API version %s" % sg.__version__) try: for collection in self._pluginCollections: collection.load() self._loadEventIdData() self._mainLoop() except KeyboardInterrupt: self.log.warning("Keyboard interrupt. Cleaning up...") except Exception as err: msg = "Crash!!!!! Unexpected error (%s) in main loop.\n\n%s" self.log.critical(msg, type(err), traceback.format_exc(err)) def _loadEventIdData(self): """ Load the last processed event id from the disk If no event has ever been processed or if the eventIdFile has been deleted from disk, no id will be recoverable. In this case, we will try contacting Shotgun to get the latest event's id and we'll start processing from there. """ eventIdFile = self.config.getEventIdFile() if eventIdFile and os.path.exists(eventIdFile): try: fh = open(eventIdFile, "rb") try: self._eventIdData = pickle.load(fh) # Provide event id info to the plugin collections. Once # they've figured out what to do with it, ask them for their # last processed id. noStateCollections = [] for collection in self._pluginCollections: state = self._eventIdData.get(collection.path) if state: collection.setState(state) else: noStateCollections.append(collection) # If we don't have a state it means there's no match # in the id file. First we'll search to see the latest id a # matching plugin name has elsewhere in the id file. We do # this as a fallback in case the plugins directory has been # moved. If there's no match, use the latest event id # in Shotgun. if noStateCollections: maxPluginStates = {} for collection in self._eventIdData.values(): for pluginName, pluginState in collection.items(): if pluginName in maxPluginStates.keys(): if pluginState[0] > maxPluginStates[pluginName][0]: maxPluginStates[pluginName] = pluginState else: maxPluginStates[pluginName] = pluginState lastEventId = self._getLastEventIdFromDatabase() for collection in noStateCollections: state = collection.getState() for pluginName in state.keys(): if pluginName in maxPluginStates.keys(): state[pluginName] = maxPluginStates[pluginName] else: state[pluginName] = lastEventId collection.setState(state) except pickle.UnpicklingError: fh.close() # Backwards compatibility: # Reopen the file to try to read an old-style int fh = open(eventIdFile, "rb") line = fh.readline().strip() if line.isdigit(): # The _loadEventIdData got an old-style id file containing a single # int which is the last id properly processed. lastEventId = int(line) self.log.debug( "Read last event id (%d) from file.", lastEventId ) for collection in self._pluginCollections: collection.setState(lastEventId) fh.close() except OSError as err: raise EventDaemonError( "Could not load event id from file.\n\n%s" % traceback.format_exc(err) ) else: # No id file? # Get the event data from the database. lastEventId = self._getLastEventIdFromDatabase() if lastEventId: for collection in self._pluginCollections: collection.setState(lastEventId) self._saveEventIdData() def _getLastEventIdFromDatabase(self): conn_attempts = 0 lastEventId = None while lastEventId is None: order = [{"column": "id", "direction": "desc"}] try: result = self._sg.find_one( "EventLogEntry", filters=[], fields=["id"], order=order ) except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err)) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) else: lastEventId = result["id"] self.log.info("Last event id (%d) from the SG database.", lastEventId) return lastEventId def _mainLoop(self): """ Run the event processing loop. General behavior: - Load plugins from disk - see L{load} method. - Get new events from Shotgun - Loop through events - Loop through each plugin - Loop through each callback - Send the callback an event - Once all callbacks are done in all plugins, save the eventId - Go to the next event - Once all events are processed, wait for the defined fetch interval time and start over. Caveats: - If a plugin is deemed "inactive" (an error occured during registration), skip it. - If a callback is deemed "inactive" (an error occured during callback execution), skip it. - Each time through the loop, if the pidFile is gone, stop. """ self.log.debug("Starting the event processing loop.") while self._continue: # Process events events = self._getNewEvents() for event in events: for collection in self._pluginCollections: collection.process(event) self._saveEventIdData() # if we're lagging behind Shotgun, we received a full batch of events # skip the sleep() call in this case if len(events) < self.config.getMaxEventBatchSize(): time.sleep(self._fetch_interval) # Reload plugins for collection in self._pluginCollections: collection.load() # Make sure that newly loaded events have proper state. self._loadEventIdData() self.log.debug("Shuting down event processing loop.") def stop(self): self._continue = False def _getNewEvents(self): """ Fetch new events from Shotgun. @return: Recent events that need to be processed by the engine. @rtype: I{list} of Shotgun event dictionaries. """ nextEventId = None for newId in [ coll.getNextUnprocessedEventId() for coll in self._pluginCollections ]: if newId is not None and (nextEventId is None or newId < nextEventId): nextEventId = newId if nextEventId is not None: filters = [["id", "greater_than", nextEventId - 1]] fields = [ "id", "event_type", "attribute_name", "meta", "entity", "user", "project", "session_uuid", "created_at", ] order = [{"column": "id", "direction": "asc"}] conn_attempts = 0 while True: try: events = self._sg.find( "EventLogEntry", filters, fields, order, limit=self.config.getMaxEventBatchSize(), ) if events: self.log.debug( "Got %d events: %d to %d.", len(events), events[0]["id"], events[-1]["id"], ) return events except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts( conn_attempts, str(err) ) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) return [] def _saveEventIdData(self): """ Save an event Id to persistant storage. Next time the engine is started it will try to read the event id from this location to know at which event it should start processing. """ eventIdFile = self.config.getEventIdFile() if eventIdFile is not None: for collection in self._pluginCollections: self._eventIdData[collection.path] = collection.getState() for colPath, state in self._eventIdData.items(): if state: try: with open(eventIdFile, "wb") as fh: # Use protocol 2 so it can also be loaded in Python 2 pickle.dump(self._eventIdData, fh, protocol=2) except OSError as err: self.log.error( "Can not write event id data to %s.\n\n%s", eventIdFile, traceback.format_exc(err), ) break else: self.log.warning("No state was found. Not saving to disk.") def _checkConnectionAttempts(self, conn_attempts, msg): conn_attempts += 1 if conn_attempts == self._max_conn_retries: self.log.error( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) conn_attempts = 0 time.sleep(self._conn_retry_sleep) else: self.log.warning( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) return conn_attempts class PluginCollection(object): """ A group of plugin files in a location on the disk. """ def __init__(self, engine, path): if not os.path.isdir(path): raise ValueError("Invalid path: %s" % path) self._engine = engine self.path = path self._plugins = {} self._stateData = {} def setState(self, state): if isinstance(state, int): for plugin in self: plugin.setState(state) self._stateData[plugin.getName()] = plugin.getState() else: self._stateData = state for plugin in self: pluginState = self._stateData.get(plugin.getName()) if pluginState: plugin.setState(pluginState) def getState(self): for plugin in self: self._stateData[plugin.getName()] = plugin.getState() return self._stateData def getNextUnprocessedEventId(self): eId = None for plugin in self: if not plugin.isActive(): continue newId = plugin.getNextUnprocessedEventId() if newId is not None and (eId is None or newId < eId): eId = newId return eId def process(self, event): for plugin in self: if plugin.isActive(): plugin.process(event) else: plugin.logger.debug("Skipping: inactive.") def load(self): """ Load plugins from disk. General behavior: - Loop on all paths. - Find all valid .py plugin files. - Loop on all plugin files. - For any new plugins, load them, otherwise, refresh them. """ newPlugins = {} for basename in os.listdir(self.path): if not basename.endswith(".py") or basename.startswith("."): continue if basename in self._plugins: newPlugins[basename] = self._plugins[basename] else: newPlugins[basename] = Plugin( self._engine, os.path.join(self.path, basename) ) newPlugins[basename].load() self._plugins = newPlugins def __iter__(self): for basename in sorted(self._plugins.keys()): yield self._plugins[basename] class Plugin(object): """ The plugin class represents a file on disk which contains one or more callbacks. """ def __init__(self, engine, path): """ @param engine: The engine that instanciated this plugin. @type engine: L{Engine} @param path: The path of the plugin file to load. @type path: I{str} @raise ValueError: If the path to the plugin is not a valid file. """ self._engine = engine self._path = path if not os.path.isfile(path): raise ValueError("The path to the plugin is not a valid file - %s." % path) self._pluginName = os.path.splitext(os.path.split(self._path)[1])[0] self._active = True self._callbacks = [] self._mtime = None self._lastEventId = None self._backlog = {} # Setup the plugin's logger self.logger = logging.getLogger("plugin." + self.getName()) self.logger.config = self._engine.config self._engine.setEmailsOnLogger(self.logger, True) self.logger.setLevel(self._engine.config.getLogLevel()) if self._engine.config.getLogMode() == 1: _setFilePathOnLogger( self.logger, self._engine.config.getLogFile("plugin." + self.getName()) ) def getName(self): return self._pluginName def setState(self, state): if isinstance(state, int): self._lastEventId = state elif isinstance(state, tuple): self._lastEventId, self._backlog = state else: raise ValueError("Unknown state type: %s." % type(state)) def getState(self): return (self._lastEventId, self._backlog) def getNextUnprocessedEventId(self): if self._lastEventId: nextId = self._lastEventId + 1 else: nextId = None now = datetime.datetime.now() for k in list(self._backlog): v = self._backlog[k] if v < now: self.logger.warning("Timeout elapsed on backlog event id %d.", k) del self._backlog[k] elif nextId is None or k < nextId: nextId = k return nextId def isActive(self): """ Is the current plugin active. Should it's callbacks be run? @return: True if this plugin's callbacks should be run, False otherwise. @rtype: I{bool} """ return self._active def setEmails(self, *emails): """ Set the email addresses to whom this plugin should send errors. @param emails: See L{LogFactory.getLogger}'s emails argument for info. @type emails: A I{list}/I{tuple} of email addresses or I{bool}. """ self._engine.setEmailsOnLogger(self.logger, emails) def load(self): """ Load/Reload the plugin and all its callbacks. If a plugin has never been loaded it will be loaded normally. If the plugin has been loaded before it will be reloaded only if the file has been modified on disk. In this event callbacks will all be cleared and reloaded. General behavior: - Try to load the source of the plugin. - Try to find a function called registerCallbacks in the file. - Try to run the registration function. At every step along the way, if any error occurs the whole plugin will be deactivated and the function will return. """ # Check file mtime mtime = os.path.getmtime(self._path) if self._mtime is None: self._engine.log.info("Loading plugin at %s" % self._path) elif self._mtime < mtime: self._engine.log.info("Reloading plugin at %s" % self._path) else: # The mtime of file is equal or older. We don't need to do anything. return # Reset values self._mtime = mtime self._callbacks = [] self._active = True try: plugin = imp.load_source(self._pluginName, self._path) except: self._active = False self.logger.error( "Could not load the plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) return regFunc = getattr(plugin, "registerCallbacks", None) if callable(regFunc): try: regFunc(Registrar(self)) except: self._engine.log.critical( "Error running register callback function from plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) self._active = False else: self._engine.log.critical( "Did not find a registerCallbacks function in plugin at %s.", self._path ) self._active = False def registerCallback( self, sgScriptName, sgScriptKey, callback, matchEvents=None, args=None, stopOnError=True, ): """ Register a callback in the plugin. """ global sg sgConnection = sg.Shotgun( self._engine.config.getShotgunURL(), sgScriptName, sgScriptKey, http_proxy=self._engine.config.getEngineProxyServer(), ) self._callbacks.append( Callback( callback, self, self._engine, sgConnection, matchEvents, args, stopOnError, ) ) def process(self, event): if event["id"] in self._backlog: if self._process(event): self.logger.info("Processed id %d from backlog." % event["id"]) del self._backlog[event["id"]] self._updateLastEventId(event) elif self._lastEventId is not None and event["id"] <= self._lastEventId: msg = "Event %d is too old. Last event processed was (%d)." self.logger.debug(msg, event["id"], self._lastEventId) else: if self._process(event): self._updateLastEventId(event) return self._active def _process(self, event): for callback in self: if callback.isActive(): if callback.canProcess(event): msg = "Dispatching event %d to callback %s." self.logger.debug(msg, event["id"], str(callback)) if not callback.process(event): # A callback in the plugin failed. Deactivate the whole # plugin. self._active = False break else: msg = "Skipping inactive callback %s in plugin." self.logger.debug(msg, str(callback)) return self._active def _updateLastEventId(self, event): BACKLOG_TIMEOUT = ( 5 # time in minutes after which we consider a pending event won't happen ) if self._lastEventId is not None and event["id"] > self._lastEventId + 1: event_date = event["created_at"].replace(tzinfo=None) if datetime.datetime.now() > ( event_date + datetime.timedelta(minutes=BACKLOG_TIMEOUT) ): # the event we've just processed happened more than BACKLOG_TIMEOUT minutes ago so any event # with a lower id should have shown up in the EventLog by now if it actually happened if event["id"] == self._lastEventId + 2: self.logger.info( "Event %d never happened - ignoring.", self._lastEventId + 1 ) else: self.logger.info( "Events %d-%d never happened - ignoring.", self._lastEventId + 1, event["id"] - 1, ) else: # in this case, we want to add the missing events to the backlog as they could show up in the # EventLog within BACKLOG_TIMEOUT minutes, during which we'll keep asking for the same range # them to show up until they expire expiration = datetime.datetime.now() + datetime.timedelta( minutes=BACKLOG_TIMEOUT ) for skippedId in range(self._lastEventId + 1, event["id"]): self.logger.info("Adding event id %d to backlog.", skippedId) self._backlog[skippedId] = expiration self._lastEventId = event["id"] def __iter__(self): """ A plugin is iterable and will iterate over all its L{Callback} objects. """ return self._callbacks.__iter__() def __str__(self): """ Provide the name of the plugin when it is cast as string. @return: The name of the plugin. @rtype: I{str} """ return self.getName() class Registrar(object): """ See public API docs in docs folder. """ def __init__(self, plugin): """ Wrap a plugin so it can be passed to a user. """ self._plugin = plugin self._allowed = ["logger", "setEmails", "registerCallback"] def getLogger(self): """ Get the logger for this plugin. @return: The logger configured for this plugin. @rtype: L{logging.Logger} """ # TODO: Fix this ugly protected member access return self.logger def __getattr__(self, name): if name in self._allowed: return getattr(self._plugin, name) raise AttributeError( "type object '%s' has no attribute '%s'" % (type(self).__name__, name) ) class Callback(object): """ A part of a plugin that can be called to process a Shotgun event. """ def __init__( self, callback, plugin, engine, shotgun, matchEvents=None, args=None, stopOnError=True, ): """ @param callback: The function to run when a Shotgun event occurs. @type callback: A function object. @param engine: The engine that will dispatch to this callback. @type engine: L{Engine}. @param shotgun: The Shotgun instance that will be used to communicate with your Shotgun server. @type shotgun: L{sg.Shotgun} @param matchEvents: The event filter to match events against before invoking callback. @type matchEvents: dict @param args: Any datastructure you would like to be passed to your callback function. Defaults to None. @type args: Any object. @raise TypeError: If the callback is not a callable object. """ if not callable(callback): raise TypeError( "The callback must be a callable object (function, method or callable class instance)." ) self._name = None self._shotgun = shotgun self._callback = callback self._engine = engine self._logger = None self._matchEvents = matchEvents self._args = args self._stopOnError = stopOnError self._active = True # Find a name for this object if hasattr(callback, "__name__"): self._name = callback.__name__ elif hasattr(callback, "__class__") and hasattr(callback, "__call__"): self._name = "%s_%s" % (callback.__class__.__name__, hex(id(callback))) else: raise ValueError( "registerCallback should be called with a function or a callable object instance as callback argument." ) # TODO: Get rid of this protected member access self._logger = logging.getLogger(plugin.logger.name + "." + self._name) self._logger.config = self._engine.config def canProcess(self, event): if not self._matchEvents: return True if "*" in self._matchEvents: eventType = "*" else: eventType = event["event_type"] if eventType not in self._matchEvents: return False attributes = self._matchEvents[eventType] if attributes is None or "*" in attributes: return True if event["attribute_name"] and event["attribute_name"] in attributes: return True return False def process(self, event): """ Process an event with the callback object supplied on initialization. If an error occurs, it will be logged appropriately and the callback will be deactivated. @param event: The Shotgun event to process. @type event: I{dict} """ # set session_uuid for UI updates if self._engine._use_session_uuid: self._shotgun.set_session_uuid(event["session_uuid"]) if self._engine.timing_logger: start_time = datetime.datetime.now(SG_TIMEZONE.local) try: self._callback(self._shotgun, self._logger, event, self._args) error = False except: error = True # Get the local variables of the frame of our plugin tb = sys.exc_info()[2] stack = [] while tb: stack.append(tb.tb_frame) tb = tb.tb_next msg = "An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s" self._logger.critical( msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals) ) if self._stopOnError: self._active = False if self._engine.timing_logger: callback_name = self._logger.name.replace("plugin.", "") end_time = datetime.datetime.now(SG_TIMEZONE.local) duration = self._prettyTimeDeltaFormat(end_time - start_time) delay = self._prettyTimeDeltaFormat(start_time - event["created_at"]) msg_format = "event_id=%d created_at=%s callback=%s start=%s end=%s duration=%s error=%s delay=%s" data = [ event["id"], event["created_at"].isoformat(), callback_name, start_time.isoformat(), end_time.isoformat(), duration, str(error), delay, ] self._engine.timing_logger.info(msg_format, *data) return self._active def _prettyTimeDeltaFormat(self, time_delta): days, remainder = divmod(time_delta.total_seconds(), 86400) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) return "%02d:%02d:%02d:%02d.%06d" % ( days, hours, minutes, seconds, time_delta.microseconds, ) def isActive(self): """ Check if this callback is active, i.e. if events should be passed to it for processing. @return: True if this callback should process events, False otherwise. @rtype: I{bool} """ return self._active def __str__(self): """ The name of the callback. @return: The name of the callback @rtype: I{str} """ return self._name class CustomSMTPHandler(logging.handlers.SMTPHandler): """ A custom SMTPHandler subclass that will adapt it's subject depending on the error severity. """ LEVEL_SUBJECTS = { logging.ERROR: "ERROR - SG event daemon.", logging.CRITICAL: "CRITICAL - SG event daemon.", } def __init__( self, smtpServer, fromAddr, toAddrs, emailSubject, credentials=None, secure=None ): args = [smtpServer, fromAddr, toAddrs, emailSubject, credentials] if credentials: # Python 2.7 implemented the secure argument if CURRENT_PYTHON_VERSION >= PYTHON_27: args.append(secure) else: self.secure = secure logging.handlers.SMTPHandler.__init__(self, *args) def getSubject(self, record): subject = logging.handlers.SMTPHandler.getSubject(self, record) if record.levelno in self.LEVEL_SUBJECTS: return subject + " " + self.LEVEL_SUBJECTS[record.levelno] return subject def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ # Mostly copied from Python 2.7 implementation. try: import smtplib from email.utils import formatdate port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port) msg = self.format(record) msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( self.fromaddr, ",".join(self.toaddrs), self.getSubject(record), formatdate(), msg, ) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.close() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) class EventDaemonError(Exception): """ Base error for the Shotgun event system. """ pass class ConfigError(EventDaemonError): """ Used when an error is detected in the config file. """ pass if sys.platform == "win32": class WindowsService(win32serviceutil.ServiceFramework): """ Windows service wrapper """ _svc_name_ = "ShotgunEventDaemon" _svc_display_name_ = "Shotgun Event Handler" def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) self._engine = Engine(_getConfigPath()) def SvcStop(self): """ Stop the Windows service. """ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) self._engine.stop() def SvcDoRun(self): """ Start the Windows service. """ servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ""), ) self.main() def main(self): """ Primary Windows entry point """ self._engine.start() class LinuxDaemon(daemonizer.Daemon): """ Linux Daemon wrapper or wrapper used for foreground operation on Windows """ def __init__(self): self._engine = Engine(_getConfigPath()) super(LinuxDaemon, self).__init__( "shotgunEvent", self._engine.config.getEnginePIDFile() ) def start(self, daemonize=True): if not daemonize: # Setup the stdout logger handler = logging.StreamHandler() handler.setFormatter( logging.Formatter("%(levelname)s:%(name)s:%(message)s") ) logging.getLogger().addHandler(handler) super(LinuxDaemon, self).start(daemonize) def _run(self): """ Start the engine's main loop """ self._engine.start() def _cleanup(self): self._engine.stop() def main(): """ """ if CURRENT_PYTHON_VERSION <= PYTHON_26: print( "Python 2.5 and older is not supported anymore. Please use Python 2.6 or newer." ) return 3 action = None if len(sys.argv) > 1: action = sys.argv[1] if sys.platform == "win32" and action != "foreground": win32serviceutil.HandleCommandLine(WindowsService) return 0 if action: daemon = LinuxDaemon() # Find the function to call on the daemon and call it func = getattr(daemon, action, None) if action[:1] != "_" and func is not None: func() return 0 print("Unknown command: %s" % action) print("usage: %s start|stop|restart|foreground" % sys.argv[0]) return 2 def _getConfigPath(): """ Get the path of the shotgunEventDaemon configuration file. """ paths = ["/etc", os.path.dirname(__file__)] # Get the current path of the daemon script scriptPath = sys.argv[0] if scriptPath != "" and scriptPath != "-c": # Make absolute path and eliminate any symlinks if any. scriptPath = os.path.abspath(scriptPath) scriptPath = os.path.realpath(scriptPath) # Add the script's directory to the paths we'll search for the config. paths[:0] = [os.path.dirname(scriptPath)] # Search for a config file. for path in paths: path = os.path.join(path, "shotgunEventDaemon.conf") if os.path.exists(path): return path # No config file was found raise EventDaemonError("Config path not found, searched %s" % ", ".join(paths)) if __name__ == "__main__": sys.exit(main())
[]
[]
[]
[]
[]
python
0
0
examples/custom_start.py
import asyncio import sys import logging import json from pymadoka.controller import Controller from pymadoka.connection import discover_devices, force_device_disconnect logger = logging.getLogger(__name__) async def main(madoka): try: await force_device_disconnect(madoka.connection.address) await discover_devices() await madoka.start() device_info = await madoka.read_info() logger.info(f"Device info:\n {json.dumps(device_info, default = str)}") except Exception as e: logging.error(str(e)) asyncio.get_event_loop().stop() logging.basicConfig(level=logging.DEBUG) address = sys.argv[1] madoka = Controller(address) loop = asyncio.get_event_loop() try: asyncio.ensure_future(main(madoka)) loop.run_forever() except KeyboardInterrupt: logger.info("User stopped program.") finally: logger.info("Disconnecting...") loop.run_until_complete(madoka.stop())
[]
[]
[]
[]
[]
python
null
null
null
test/pr_sdk_test_server_test.py
# -*- coding: utf-8 -*- import unittest import os # noqa: F401 import json # noqa: F401 import time import requests from os import environ try: from ConfigParser import ConfigParser # py2 except: from configparser import ConfigParser # py3 from pprint import pprint # noqa: F401 from biokbase.workspace.client import Workspace as workspaceService from pr_sdk_test.pr_sdk_testImpl import pr_sdk_test from pr_sdk_test.pr_sdk_testServer import MethodContext from pr_sdk_test.authclient import KBaseAuth as _KBaseAuth from AssemblyUtil.AssemblyUtilClient import AssemblyUtil class pr_sdk_testTest(unittest.TestCase): @classmethod def setUpClass(cls): token = environ.get('KB_AUTH_TOKEN', None) config_file = environ.get('KB_DEPLOYMENT_CONFIG', None) cls.cfg = {} config = ConfigParser() config.read(config_file) for nameval in config.items('pr_sdk_test'): cls.cfg[nameval[0]] = nameval[1] # Getting username from Auth profile for token authServiceUrl = cls.cfg['auth-service-url'] auth_client = _KBaseAuth(authServiceUrl) user_id = auth_client.get_user(token) # WARNING: don't call any logging methods on the context object, # it'll result in a NoneType error cls.ctx = MethodContext(None) cls.ctx.update({'token': token, 'user_id': user_id, 'provenance': [ {'service': 'pr_sdk_test', 'method': 'please_never_use_it_in_production', 'method_params': [] }], 'authenticated': 1}) cls.wsURL = cls.cfg['workspace-url'] cls.wsClient = workspaceService(cls.wsURL) cls.serviceImpl = pr_sdk_test(cls.cfg) cls.scratch = cls.cfg['scratch'] cls.callback_url = os.environ['SDK_CALLBACK_URL'] @classmethod def tearDownClass(cls): if hasattr(cls, 'wsName'): cls.wsClient.delete_workspace({'workspace': cls.wsName}) print('Test workspace was deleted') def getWsClient(self): return self.__class__.wsClient def getWsName(self): if hasattr(self.__class__, 'wsName'): return self.__class__.wsName suffix = int(time.time() * 1000) wsName = "test_pr_sdk_test_" + str(suffix) ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa self.__class__.wsName = wsName return wsName def getImpl(self): return self.__class__.serviceImpl def getContext(self): return self.__class__.ctx # NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa def load_fasta_file(self, filename, obj_name, contents): f = open(filename, 'w') f.write(contents) f.close() assemblyUtil = AssemblyUtil(self.callback_url) assembly_ref = assemblyUtil.save_assembly_from_fasta({'file': {'path': filename}, 'workspace_name': self.getWsName(), 'assembly_name': obj_name }) return assembly_ref # NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa def test_filter_contigs_ok(self): # First load a test FASTA file as an KBase Assembly fasta_content = '>seq1 something soemthing asdf\n' \ 'agcttttcat\n' \ '>seq2\n' \ 'agctt\n' \ '>seq3\n' \ 'agcttttcatgg' assembly_ref = self.load_fasta_file(os.path.join(self.scratch, 'test1.fasta'), 'TestAssembly', fasta_content) # Second, call your implementation ret = self.getImpl().filter_contigs(self.getContext(), {'workspace_name': self.getWsName(), 'assembly_input_ref': assembly_ref, 'min_length': 10 }) # Validate the returned data self.assertEqual(ret[0]['n_initial_contigs'], 3) self.assertEqual(ret[0]['n_contigs_removed'], 1) self.assertEqual(ret[0]['n_contigs_remaining'], 2) def test_filter_contigs_err1(self): with self.assertRaises(ValueError) as errorContext: self.getImpl().filter_contigs(self.getContext(), {'workspace_name': self.getWsName(), 'assembly_input_ref': '1/fake/3', 'min_length': '-10'}) self.assertIn('min_length parameter cannot be negative', str(errorContext.exception)) def test_filter_contigs_err2(self): with self.assertRaises(ValueError) as errorContext: self.getImpl().filter_contigs(self.getContext(), {'workspace_name': self.getWsName(), 'assembly_input_ref': '1/fake/3', 'min_length': 'ten'}) self.assertIn('Cannot parse integer from min_length parameter', str(errorContext.exception))
[]
[]
[ "SDK_CALLBACK_URL" ]
[]
["SDK_CALLBACK_URL"]
python
1
0