Showing posts with label Cloud Computing. Show all posts
Showing posts with label Cloud Computing. Show all posts

Friday, April 5, 2013

Run EC2 Jobs with Airavata - Part III

This is a followup to my earlier posts [1] [2]. Here we will execute the application mentioned in [2] programmetically using Airavata.

import org.apache.airavata.commons.gfac.type.*;
import org.apache.airavata.gfac.GFacAPI;
import org.apache.airavata.gfac.GFacConfiguration;
import org.apache.airavata.gfac.GFacException;
import org.apache.airavata.gfac.context.security.AmazonSecurityContext;
import org.apache.airavata.gfac.context.ApplicationContext;
import org.apache.airavata.gfac.context.JobExecutionContext;
import org.apache.airavata.gfac.context.MessageContext;
import org.apache.airavata.schemas.gfac.*;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;

import java.io.File;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;

/**
 * Your Amazon instance should be in a running state before running this test.
 */
public class EC2ProviderTest {
    private JobExecutionContext jobExecutionContext;

    private static final String hostName = "ec2-host";

    private static final String hostAddress = "ec2-address";

    private static final String sequence1 = "RR042383.21413#CTGGCACGGAGTTAGCCGATCCTTATTCATAAAGTACATGCAAACGGGTATCCATA" +
            "CTCGACTTTATTCCTTTATAAAAGAAGTTTACAACCCATAGGGCAGTCATCCTTCACGCTACTTGGCTGGTTCAGGCCTGCGCCCATTGACCAATATTCCTCA" +
            "CTGCTGCCTCCCGTAGGAGTTTGGACCGTGTCTCAGTTCCAATGTGGGGGACCTTCCTCTCAGAACCCCTATCCATCGAAGACTAGGTGGGCCGTTACCCCGC" +
            "CTACTATCTAATGGAACGCATCCCCATCGTCTACCGGAATACCTTTAATCATGTGAACATGCGGACTCATGATGCCATCTTGTATTAATCTTCCTTTCAGAAG" +
            "GCTGTCCAAGAGTAGACGGCAGGTTGGATACGTGTTACTCACCGTGCCGCCGGTCGCCATCAGTCTTAGCAAGCTAAGACCATGCTGCCCCTGACTTGCATGT" +
            "GTTAAGCCTGTAGCTTAGCGTTC";

    private static final String sequence2 = "RR042383.31934#CTGGCACGGAGTTAGCCGATCCTTATTCATAAAGTACATGCAAACGGGTATCCATA" +
            "CCCGACTTTATTCCTTTATAAAAGAAGTTTACAACCCATAGGGCAGTCATCCTTCACGCTACTTGGCTGGTTCAGGCTCTCGCCCATTGACCAATATTCCTCA" +
            "CTGCTGCCTCCCGTAGGAGTTTGGACCGTGTCTCAGTTCCAATGTGGGGGACCTTCCTCTCAGAACCCCTATCCATCGAAGACTAGGTGGGCCGTTACCCCGC" +
            "CTACTATCTAATGGAACGCATCCCCATCGTCTACCGGAATACCTTTAATCATGTGAACATGCGGACTCATGATGCCATCTTGTATTAAATCTTCCTTTCAGAA" +
            "GGCTATCCAAGAGTAGACGGCAGGTTGGATACGTGTTACTCACCGTGCG";

    /* Following variables are needed to be set in-order to run the test. Since these are account specific information,
       I'm not adding the values here. It's the responsibility of the person who's running the test to update
       these variables accordingly.
       */

    /* Username used to log into your ec2 instance eg.ec2-user */
    private String userName = "";

    /* Secret key used to connect to the image */
    private String secretKey = "";

    /* Access key used to connect to the image */
    private String accessKey = "";

    /* Instance id of the running instance of your image */
    private String instanceId = "";

    @Before
    public void setUp() throws Exception {
        URL resource = GramProviderTest.class.getClassLoader().getResource("gfac-config.xml");
        assert resource != null;
        System.out.println(resource.getFile());
        GFacConfiguration gFacConfiguration = GFacConfiguration.create(new File(resource.getPath()), null, null);

        /* EC2 Host */
        HostDescription host = new HostDescription(Ec2HostType.type);
        host.getType().setHostName(hostName);
        host.getType().setHostAddress(hostAddress);

        /* App */
        ApplicationDescription ec2Desc = new ApplicationDescription(Ec2ApplicationDeploymentType.type);
        Ec2ApplicationDeploymentType ec2App = (Ec2ApplicationDeploymentType)ec2Desc.getType();

        String serviceName = "Gnome_distance_calculation_workflow";
        ec2Desc.getType().addNewApplicationName().setStringValue(serviceName);
        ec2App.setJobType(JobTypeType.EC_2);
        ec2App.setExecutable("/home/ec2-user/run.sh");
        ec2App.setExecutableType("sh");

        /* Service */
        ServiceDescription serv = new ServiceDescription();
        serv.getType().setName("GenomeEC2");

        List inputList = new ArrayList();

        InputParameterType input1 = InputParameterType.Factory.newInstance();
        input1.setParameterName("genome_input1");
        input1.setParameterType(StringParameterType.Factory.newInstance());
        inputList.add(input1);

        InputParameterType input2 = InputParameterType.Factory.newInstance();
        input2.setParameterName("genome_input2");
        input2.setParameterType(StringParameterType.Factory.newInstance());
        inputList.add(input2);

        InputParameterType[] inputParamList = inputList.toArray(new InputParameterType[inputList.size()]);

        List outputList = new ArrayList();
        OutputParameterType output = OutputParameterType.Factory.newInstance();
        output.setParameterName("genome_output");
        output.setParameterType(StringParameterType.Factory.newInstance());
        outputList.add(output);

        OutputParameterType[] outputParamList = outputList
                .toArray(new OutputParameterType[outputList.size()]);

        serv.getType().setInputParametersArray(inputParamList);
        serv.getType().setOutputParametersArray(outputParamList);

        jobExecutionContext = new JobExecutionContext(gFacConfiguration,serv.getType().getName());
        ApplicationContext applicationContext = new ApplicationContext();
        jobExecutionContext.setApplicationContext(applicationContext);
        applicationContext.setServiceDescription(serv);
        applicationContext.setApplicationDeploymentDescription(ec2Desc);
        applicationContext.setHostDescription(host);

        AmazonSecurityContext amazonSecurityContext =
                new AmazonSecurityContext(userName, accessKey, secretKey, instanceId);
        jobExecutionContext.addSecurityContext(AmazonSecurityContext.AMAZON_SECURITY_CONTEXT, amazonSecurityContext);

        MessageContext inMessage = new MessageContext();
        ActualParameter genomeInput1 = new ActualParameter();
        ((StringParameterType)genomeInput1.getType()).setValue(sequence1);
        inMessage.addParameter("genome_input1", genomeInput1);

        ActualParameter genomeInput2 = new ActualParameter();
        ((StringParameterType)genomeInput2.getType()).setValue(sequence2);
        inMessage.addParameter("genome_input2", genomeInput2);

        MessageContext outMessage = new MessageContext();
        ActualParameter echo_out = new ActualParameter();
        outMessage.addParameter("distance", echo_out);

        jobExecutionContext.setInMessageContext(inMessage);
        jobExecutionContext.setOutMessageContext(outMessage);
    }

    @Test
    public void testGramProvider() throws GFacException {
        GFacAPI gFacAPI = new GFacAPI();
        gFacAPI.submitJob(jobExecutionContext);
        MessageContext outMessageContext = jobExecutionContext.getOutMessageContext();
        Assert.assertEquals(MappingFactory.
                toString((ActualParameter) outMessageContext.getParameter("genome_output")), "476");
    }
}

References
[1] - http://heshans.blogspot.com/2013/04/run-ec2-jobs-with-airavata-part-i.html
[2] - http://heshans.blogspot.com/2013/04/run-ec2-jobs-with-airavata-part-ii.html 

Run EC2 Jobs with Airavata - Part II

In this post we will look at how to compose a workflow out of an application that is installed in an Amazon Machine Image (AMI). In the earlier post we discussed how to do ec2 instance management using XBaya GUI. This is the followup to that post.

For the Airavata EC2 integration testing, I created an AMI which has an application which does gene sequence alignment using Smith-Waterman algorithm. I will be using that application as a reference to this post. You can use an application of your preference that resides in your AMI.

1. Unzip Airavata server distribution and start the server.
unzip apache-airavata-server-0.7-bin.zip
cd apache-airavata-server-0.7/bin
./airavata-server.sh

2. Unzip Airavata XBaya distribution and start XBaya.
unzip apache-airavata-xbaya-gui-0.7-bin.zip
cd apache-airavata-xbaya-gui-0.7/bin
./xbaya-gui.sh

Then you'll get the XBaya UI.


3. Select "XBaya" Menu and click "Add Host" to register an EC2 Host. Once you add the details, click   "ok".


4. You will then be prompted to enter "Airavata Registry" information. If you are using the default setup, you don't have to do any configuration. Just click "ok".


5. In order to use your application installed in the AMI, you must register it as an application in Airavata system. Select "XBaya" menu and click "Register Application". You will get the following dialog. Add the input parameters expected and the output parameters generated by your application.


6. Then Click the "New deployment" button. You have to then select the EC2Host that you registered earlier as the Application Host. Configure the executable path to your application in your AMI and click "Add".


7. Then click "Register". If the application registration was successful, you will be getting the following message.


8. Now select "Registry" menu and click "Setup Airavata Registry". Click "ok".


9. Select "XBaya" menu and click "New workflow". Then configure it accordingly.


10. Select your registered application from the "Application Services" and drag drop it to the workflow window.


11. Drag an "Instance" component from "Amazon Components" and drop it into workflow window. Then connect it to your application using Control ports.


12. Click on top of the "Instance" components config label. Configure your instance accordingly.


13. Drag and drop two input components and one output component to the workflow from "System Components".


14. Connect the components together accordingly.


15. Now click the red colored "play" button to run your workflow. You will be prompted for the inputs   values (in my case the gene sequences) and experiment id. Then click "Run" to execute your workflow.


16. The execution result will be shown in the XBaya GUI.


References
[1] - http://heshans.blogspot.com/2013/04/run-ec2-jobs-with-airavata-part-i.html

Run EC2 Jobs with Airavata - Part I

This will be the first of  many posts that I will be doing on Apache Airavata EC2 integration. First let's have a look at how you can use Airavata's "XBaya GUI" to manage amazon instances.

Applies to : Airavata 0.7 and above

1. Unzip Airavata server distribution and start the server.
unzip apache-airavata-server-0.7-bin.zip
cd apache-airavata-server-0.7/bin
./airavata-server.sh
2. Unzip Airavata XBaya distribution and start XBaya.
unzip apache-airavata-xbaya-gui-0.7-bin.zip
cd apache-airavata-xbaya-gui-0.7/bin
./xbaya-gui.sh
Then you'll get the XBaya UI.


3. Then Select "Amazon" menu and click "Security Credentials". Specify your secret key and access key in the security credentials dialog box and click "ok".


4. Then Select "Amazon" menu and click "EC2 Instance Management". It will give a glimpse of your running instances.


5. Click the "launch" button to launch new instances and "terminate" button to terminate, running instances.


6. When you launch a new instance, it will be showed in your "Amazon EC2 Management Console".



Friday, March 15, 2013

Airavata Deployment Studio (ADS)


This is an independent study that I have been doing for Apache Airavata [1]. Airavata Deployment Studio or simply ADS, is a platform where an Airavata user can deploy his/her Airavata deployment on a Cloud computing resource on demand. Now let's dive into ADS and what's the actual problem that we are trying the solve here. 


What is Airavata? 


Airavata is a framework which enables a user to build Science Gateways. It is used to compose, manage, execute and monitor distributed applications and workflows on computational resources. These computational resources can range from local resources to computational grids and clouds. Therefore, various users with different backgrounds either contribute or use Airavata in their applications.



Who uses Airavata? 

From the Airavata standpoint, three main users can be identified.


1) End Users


End User is the one who will have a model code to do some scientific application. Sometimes this End User can be a Research Scientist. He/She writes scripts to wrap the applications up and by executing those scripts, they run the scientific workflows in Super Computers. This can be called a scientific experiment.

2) Gateway Developers


The Research Scientist is the one who comes up with requirement of bundling scientific applications together and composing as a workflow. The job of the Gateway Developer is to use Airavata and wrap the above mentioned model code and scripts together. Then, scientific workflows are created out these. In some cases, Scientist might be the Gateway Developer as well.

3) Core Developers


Core Developer is the one who develops and contributes to Airavata framework code-base. The Gateway Developers use the software developed by the Core Developers to create science gateways.

Why ADS?

According to the above description, Airavata is used by different people with different technical backgrounds. Some people will have in depth technical knowledge on their scientific domains; like chemistry, biology, astronomy, etc and may not have in depth knowledge on computer science aspects such as cluster configuration, configuring and trouble-shooting in VMs, etc. 

When it comes to ADS, it's targeted towards the first two types of users as they will be ones who will be running in to configuration issues with Airavata in their respective systems. 

Sometimes we come across instances where a user might run into issues while setting up Airavata on their Systems. These might be attributed to; 
  1. User not following the documented steps properly.
  2. Issues in setting up the user environment. 
  3. User not being able to diagnose the issues at their end on their own.
  4. Sometimes when we try to diagnose their issue remotely, we face difficulties trying to access user's VM remotely due to security policies defined in their System. 
  5. Different security policies at client's firewall.

Due to the above mentioned issues, a first time user might go away with a bad impression due to a System/VM level issue that might not be directly related to Airavata. 

What we are trying to do here is to give a first time user a good first impression as well as ease of configuring the Airavata eco system for production usage. 

How? 

Now you might be wondering how does ADS achieve this? ADS will use FutureGrid [3] as the underlying resource platform for this application. If you are interested in learning about what FutureGrid is, please refer [3] for more information. ADS will ultimately become a plugin to the FutureGrid's CloudMesh [4] environment.

ADS will provide a user with a web interface which a user can use to configure his/her Airavata eco system. Once the configuration options are selected and user hits the submit button, a new VM with the selected configurations will be created. The user will be able to create his/her image with the following properties. 
  • Infrastructure - eg: OpenStack, Eucalyptus, EC2, etc
  • Architecture - eg: 64-bit, 32-bit 
  • Memory - eg: 2GB, 4GB, 8GB, etc
  • Operating System - eg: Ubuntu, CentOS, Fedora, etc
  • Java version - eg: Java 1.6, Java 1.7
  • Tomcat Version - eg: Tomcat6, Tomcat7
  • Airavata Version - eg: Airavata-0.6, Airavata-0.7

Advantages?

  1. One click install. 
  2. No need to interact with the shell to configure an Airavata environment.
  3. Deploying on various Cloud platforms based on user preference.
  4. Ease of use. 
  5. First time user will be able to quickly configure an insatnce of his own and run a sample workflow quickly. 
  6. On demand aspect.

Sneak Peak

Following screenshots show how ADS will look like.









References 


Thursday, March 29, 2012

Monitor CPU Utilization of a Amazon EC2 instance using Amazon CloudWatch

Following code snippet show how to monitor CPU Utilization of a Amazon EC2 instance using Amazon Cloud Watch. In order to monitor the instance, Cloud Watch Monitoring should be enabled for the running instance.
private double monitorInstance(AWSCredentials credential, String instanceId) {
try {
AmazonCloudWatchClient cw = new AmazonCloudWatchClient(credential) ;

long offsetInMilliseconds = 1000 * 60 * 60 * 24;
GetMetricStatisticsRequest request = new GetMetricStatisticsRequest()
.withStartTime(new Date(new Date().getTime() - offsetInMilliseconds))
.withNamespace("AWS/EC2")
.withPeriod(60 * 60)
.withDimensions(new Dimension().withName("InstanceId").withValue(instanceId))
.withMetricName("CPUUtilization")
.withStatistics("Average", "Maximum")
.withEndTime(new Date());
GetMetricStatisticsResult getMetricStatisticsResult = cw.getMetricStatistics(request);

double avgCPUUtilization = 0;
List dataPoint = getMetricStatisticsResult.getDatapoints();
for (Object aDataPoint : dataPoint) {
Datapoint dp = (Datapoint) aDataPoint;
avgCPUUtilization = dp.getAverage();
log.info(instanceId + " instance's average CPU utilization : " + dp.getAverage());
}

return avgCPUUtilization;

} catch (AmazonServiceException ase) {
log.severe("Caught an AmazonServiceException, which means the request was made "
+ "to Amazon EC2, but was rejected with an error response for some reason.");
log.severe("Error Message: " + ase.getMessage());
log.severe("HTTP Status Code: " + ase.getStatusCode());
log.severe("AWS Error Code: " + ase.getErrorCode());
log.severe("Error Type: " + ase.getErrorType());
log.severe("Request ID: " + ase.getRequestId());

}
return 0;
}

Thursday, September 30, 2010

Build WSO2 Stratos ESB from source

Following blogpost will discuss how to build WSO2's Stratos ESB (aka Cloud ESB) from source.

1. First build carbon trunk upto features. Please follow my earlier post on building WSO2 ESB from source.

2. Checkout WSO2 Stratos source.

svn co https://svn.wso2.org/repos/wso2/trunk/stratos

Then following directories/files will be checked into your file system.

.
|-- build
|-- components
|-- features
|-- pom.xml
|-- samples
|-- services
`-- setup

3. Build Stratos components project.

heshan@heshan-laptop:~/Dev/trunk/stratos/components$ mvn clean install -Dmaven.test.skip=true

4. Build Strtos features project.

heshan@heshan-laptop:~/Dev/trunk/stratos/features$ mvn clean install -Dmaven.test.skip=true

5. Build Stratos Manager.

heshan@heshan-laptop:~/Dev/trunk/stratos/services/manager$ mvn clean install -Dmaven.test.skip=true

6. Build Stratos ESB.

heshan@heshan-laptop:~/Dev/trunk/stratos/services/esb$ mvn clean install -Dmaven.test.skip=true

7. Run the setup script.

heshan@heshan-laptop:~/Dev/trunk/stratos/setup$ ./setup.sh

8. Create the necessary databases.

mysql>CREATE DATABASE stratos_db;

mysql>CREATE DATABASE WSO2CARBON_DB;

mysql>CREATE DATABASE billing;

Create users and give necessary user-permssions.

mysql>CREATE USER 'wso2stratos'@'localhost' IDENTIFIED BY 'wso2stratos';
mysql>GRANT ALL ON *.* TO 'wso2stratos'@'localhost';

mysql>CREATE USER 'billing'@'localhost' IDENTIFIED BY 'billing';
mysql>GRANT ALL ON *.* TO 'billing'@'localhost';

Create tables.

heshan@heshan-laptop:~/Dev/trunk/stratos/services/manager/modules/distribution/target/wso2stratos-manager-1.0.0-SNAPSHOT/dbscripts$ mysql -u root -p < billing-mysql.sql billing

9. Start the Stratos Manager.
NOTE: Server should be started with -Dsetup option in the first time.

heshan@heshan-laptop:~/Dev/trunk/stratos/services/manager/modules/distribution/target/wso2stratos-manager-1.0.0-SNAPSHOT/bin$ ./wso2server.sh -Dsetup

10. Start Stratos ESB.
NOTE: Server should be started with -Dsetup option in the first time.

heshan@heshan-laptop:~/Dev/trunk/stratos/services/esb/modules/distribution/target/wso2stratos-esb-1.0.0-SNAPSHOT/bin$ ./wso2server.sh -Dsetup

11. Stop the above servers. Then startup the both the servers again without -Dsetup option.

heshan@heshan-laptop:~/Dev/trunk/stratos/services/manager/modules/distribution/target/wso2stratos-manager-1.0.0-SNAPSHOT/bin$ ./wso2server.sh
heshan@heshan-laptop:~/Dev/trunk/stratos/services/esb/modules/distribution/target/wso2stratos-esb-1.0.0-SNAPSHOT/bin$ ./wso2server.sh

12. Create a Tenent by logging into the Stratos Manager. Then using the credentials obtained, you can log into the Stratos ESB.