Commit 34368aca authored by tank.li@mushiny.com's avatar tank.li@mushiny.com

雅马哈作为一个分支存在

parent af3ea2d1
......@@ -84,7 +84,12 @@ public class WMSController {
logger.debug("释放货架:" + json);
MessageDTO dto = MessageDTO.success();
try {
this.wmsService.podRelease(req);
boolean flag = this.wmsService.podRelease(req);
if(!flag){
dto.setMESSAGE("货架释放失败,请确认任务是否完成!");
dto.setCODE(1);
return ResponseEntity.ok(dto);
}
} catch (Exception e) {
dto.setMESSAGE(e.getMessage());
dto.setCODE(1);
......
package com.mushiny.heli.xnr.hardware;
import com.mushiny.heli.xnr.dto.MessageDTO;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
public class LabelAndPrinter {
@PostMapping(value = "/sendPrinter", produces = MediaType.APPLICATION_JSON_VALUE)
public ResponseEntity<MessageDTO> syncItem(
@RequestBody String json){
return null;
}
}
package com.mushiny.heli.xnr.jdbc.config;
import com.alibaba.druid.pool.DruidDataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.web.servlet.ServletComponentScan;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import javax.sql.DataSource;
/**
* @author wuweifeng wrote on 2017/10/23.
* 数据库连接属性配置
*/
@ServletComponentScan
@Configuration
public class DruidDBConfig {
private Logger logger = LoggerFactory.getLogger(DruidDBConfig.class);
@Value("${spring.datasource.primary.url}")
private String dbUrl1;
@Value("${spring.datasource.primary.username}")
private String username1;
@Value("${spring.datasource.primary.password}")
private String password1;
@Value("${spring.datasource.primary.driver-class-name}")
private String driverClassName1;
@Value("${spring.datasource.secondary.username}")
private String username2;
@Value("${spring.datasource.secondary.password}")
private String password2;
@Value("${spring.datasource.secondary.url}")
private String dbUrl2;
@Value("${spring.datasource.secondary.driver-class-name}")
private String driverClassName2;
@Value("5")
private int initialSize;
@Value("5")
private int minIdle;
@Value("20")
private int maxActive;
@Value("60000")
private int maxWait;
/**
* 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
*/
@Value("60000")
private int timeBetweenEvictionRunsMillis;
/**
* 配置一个连接在池中最小生存的时间,单位是毫秒
*/
@Value("300000")
private int minEvictableIdleTimeMillis;
@Value("SELECT 1 FROM DUAL")
private String validationQuery;
@Value("true")
private boolean testWhileIdle;
@Value("false")
private boolean testOnBorrow;
@Value("false")
private boolean testOnReturn;
/**
* 打开PSCache,并且指定每个连接上PSCache的大小
*/
@Value("true")
private boolean poolPreparedStatements;
@Value("20")
private int maxPoolPreparedStatementPerConnectionSize;
/**
* 配置监控统计拦截的filters,去掉后监控界面sql无法统计,'wall'用于防火墙
*/
// @Value("stat,wall,log4j")
// private String filters;
/**
* 通过connectProperties属性来打开mergeSql功能;慢SQL记录
*/
@Value("druid.stat.mergeSql=true;druid.stat.slowSqlMillis=500")
private String connectionProperties;
@Bean(name = "primaryDataSource")
@Qualifier("primaryDataSource")
public DataSource dataSource() {
return getDruidDataSource(username1, password1, dbUrl1,driverClassName1);
}
@Bean(name = "secondaryDataSource")
@Qualifier("secondaryDataSource")
@Primary
public DataSource secondaryDataSource() {
return getDruidDataSource(username2, password2, dbUrl2,driverClassName2);
}
private DruidDataSource getDruidDataSource(String username, String password, String url,String driverClassName) {
DruidDataSource datasource = new DruidDataSource();
datasource.setUrl(url);
datasource.setUsername(username);
datasource.setPassword(password);
datasource.setDriverClassName(driverClassName);
//configuration
datasource.setInitialSize(initialSize);
datasource.setMinIdle(minIdle);
datasource.setMaxActive(maxActive);
datasource.setMaxWait(maxWait);
datasource.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis);
datasource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis);
datasource.setValidationQuery(validationQuery);
datasource.setTestWhileIdle(testWhileIdle);
datasource.setTestOnBorrow(testOnBorrow);
datasource.setTestOnReturn(testOnReturn);
datasource.setPoolPreparedStatements(poolPreparedStatements);
datasource.setMaxPoolPreparedStatementPerConnectionSize(maxPoolPreparedStatementPerConnectionSize);
// try {
// datasource.setFilters(filters);
// } catch (SQLException e) {
// logger.error("druid configuration initialization filter : {0}", e);
// }
datasource.setConnectionProperties(connectionProperties);
return datasource;
}
}
......@@ -12,13 +12,13 @@ import javax.sql.DataSource;
/**
* Created by Tank.li on 2017/6/13.
*/
@Configuration
@ImportResource(locations={"classpath:sql.xml"})
/*@Configuration
@ImportResource(locations={"classpath:sql.xml"})*/
public class JdbcConfig {
@Autowired
//@Autowired
private Environment env;
@Bean
//@Bean
public DataSource dataSource() {
System.out.println("in init datasource");
DruidDataSource dataSource = new DruidDataSource();
......
package com.mushiny.heli.xnr.jdbc.config;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.orm.jpa.EntityManagerFactoryBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
import org.springframework.orm.jpa.JpaTransactionManager;
import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import javax.annotation.Resource;
import javax.persistence.EntityManager;
import javax.sql.DataSource;
import java.util.Properties;
/**
* Created by Administrator on 2017/8/11.
* <p>
* 数据源一
*/
@Configuration
@EnableTransactionManagement
@EnableJpaRepositories(
entityManagerFactoryRef = "entityManagerFactoryPrimary",
transactionManagerRef = "transactionManagerPrimary",
basePackages = {"com.mushiny.heli.xnr.repository.postgres"})
public class MysqlConfig {
@Resource
@Qualifier("primaryDataSource")
private DataSource primaryDataSource;
@Primary
@Bean(name = "entityManagerPrimary")
public EntityManager entityManager(EntityManagerFactoryBuilder builder) {
return entityManagerFactoryPrimary(builder).getObject().createEntityManager();
}
@Resource
private Properties jpaProperties;
/**
* 设置实体类所在位置
*/
@Primary
@Bean(name = "entityManagerFactoryPrimary")
public LocalContainerEntityManagerFactoryBean entityManagerFactoryPrimary(EntityManagerFactoryBuilder builder) {
LocalContainerEntityManagerFactoryBean entityManagerFactory = builder
.dataSource(primaryDataSource)
.packages("com.mushiny.solution.zksg.entity.mysql")
.persistenceUnit("primaryPersistenceUnit")
.build();
entityManagerFactory.setJpaProperties(jpaProperties);
return entityManagerFactory;
}
@Primary
@Bean(name = "transactionManagerPrimary")
public PlatformTransactionManager transactionManagerPrimary(EntityManagerFactoryBuilder builder) {
return new JpaTransactionManager(entityManagerFactoryPrimary(builder).getObject());
}
}
package com.mushiny.heli.xnr.jdbc.config;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.orm.jpa.EntityManagerFactoryBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
import org.springframework.orm.jpa.JpaTransactionManager;
import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import javax.annotation.Resource;
import javax.persistence.EntityManager;
import javax.sql.DataSource;
import java.util.Properties;
/**
* Created by Administrator on 2017/8/11.
* <p>
* 数据源一
*/
@Configuration
@EnableTransactionManagement
@EnableJpaRepositories(
entityManagerFactoryRef = "entityManagerFactorySecondary",
transactionManagerRef = "transactionManagerSecondary",
basePackages = {"com.mushiny.heli.xnr.repository.postgres"}) //设置Repository所在位置
public class PostgresConfig {
@Resource
@Qualifier("secondaryDataSource")
private DataSource secondaryDataSource;
@Resource
private Properties jpaProperties;
@Bean(name = "entityManagerSecondary")
public EntityManager entityManager(EntityManagerFactoryBuilder builder) {
return entityManagerFactorySecondary(builder).getObject().createEntityManager();
}
@Bean(name = "entityManagerFactorySecondary")
public LocalContainerEntityManagerFactoryBean entityManagerFactorySecondary(EntityManagerFactoryBuilder builder) {
LocalContainerEntityManagerFactoryBean entityManagerFactory
= builder
.dataSource(secondaryDataSource)
.packages("com.mushiny.solution.zksg.entity.orcal")//设置实体类所在位置
.persistenceUnit("secondaryPersistenceUnit")//持久化单元创建一个默认即可,多个便要分别命名
.build();
entityManagerFactory.setJpaProperties(jpaProperties);
return entityManagerFactory;
}
@Bean(name = "transactionManagerSecondary")
public PlatformTransactionManager transactionManagerPrimary(EntityManagerFactoryBuilder builder) {
return new JpaTransactionManager(entityManagerFactorySecondary(builder).getObject());
}
}
......@@ -389,7 +389,7 @@ public class InboundService {
}
List<Map> runningPods = this.jdbcRepository.queryBySql(Sql_Table.SQL_QUERY_RUNNINGPODS_INBOUND,
WMSService.stationId, Sql_Table.AVAILABLE,Sql_Table.PROCESS);
WMSService.stationId,Sql_Table.PROCESS);
Integer limit = Integer.parseInt(pods) - runningPods.size(); //TODO 按工作站区分
//先找这么多货架 然后加载所有任务
List<Map> podIds = this.jdbcRepository.queryBySql(Sql_Table.SQL_QUERY_PODS,
......
......@@ -88,7 +88,7 @@ public interface Sql_Table {
" FROM WMS_ICQA_PODORDER WHERE STATE=? AND POD_ID NOT IN " +
"(SELECT POD_ID FROM WMS_ICQA_PODORDER WHERE STATE=?) LIMIT 10";
String SQL_QUERY_RUNNINGPODS_INBOUND = "SELECT * FROM WMS_INBOUND_PODORDER WHERE WORKSTATION_ID=? " +
"AND (STATE=? OR STATE=?) LIMIT 10";
"AND STATE=? LIMIT 10";
String SQL_QUERY_RUNNINGPODS_ICQA = "SELECT * FROM WMS_ICQA_PODORDER WHERE WORKSTATION_ID=? " +
"AND (STATE=? OR STATE=?) LIMIT 10";
String SQL_QUERY_PODORDERS_INBOUND = "SELECT * FROM WMS_INBOUND_PODORDER WHERE POD_ID=? AND STATE=? ";
......@@ -118,6 +118,7 @@ public interface Sql_Table {
" MD_WORKSTATION.WORKING_FACE_ORIENTATION, \n" +
" WMS_POD_STATION.TRIP_TASKID, \n" +
" WMS_POD_STATION.SECTION_ID, \n" +
" WMS_POD_STATION.STATION_ID, \n" +
" WMS_POD_STATION.PLACEMARK FROM WMS_POD_STATION, MD_WORKSTATION \n" +
" WHERE WMS_POD_STATION.PLACEMARK = MD_WORKSTATION.STOPPOINT \n" +
" AND MD_WORKSTATION.ID = ? limit 1";
......@@ -175,7 +176,9 @@ public interface Sql_Table {
String WAREHOUSE = "456e94fe-127d-4861-9948-cc38760801b4";
String SQL_NEWSTORAGELOCATION = "SELECT MD_STORAGELOCATION.* FROM MD_STORAGELOCATION,INV_UNITLOAD \n" +
"WHERE MD_STORAGELOCATION.ID=INV_UNITLOAD.STORAGELOCATION_ID \n" +
"AND MD_STORAGELOCATION.POD_ID IS NOT NULL AND INV_UNITLOAD.ID NOT IN \n" +
"AND MD_STORAGELOCATION.POD_ID IS NOT NULL " +
"AND MD_STORAGELOCATION.POD_ID IN (SELECT ID FROM MD_POD)" + //有些信息不准确
"AND INV_UNITLOAD.ID NOT IN \n" +
"(SELECT UNITLOAD_ID from \n" +
"(SELECT sum(AMOUNT) as SUMALL,UNITLOAD_ID \n" +
"FROM INV_STOCKUNIT group by UNITLOAD_ID )UNITLOAD WHERE UNITLOAD.SUMALL>0) limit 1";
......@@ -257,4 +260,5 @@ public interface Sql_Table {
String SQL_CHECK_INBOUND_STATUS = "SELECT 1 FROM WMS_INBOUND_ORDERPOSITION " +
"WHERE ENTRYID=? AND STATE='Available' limit 1" ;
String SQL_QUERY_PODBYINDEX = "SELECT * FROM MD_POD WHERE POD_INDEX=?";
}
......@@ -351,13 +351,16 @@ public class WMSService {
/**
*
* @param req
* @return
*/
public void podRelease(Map req) {
public boolean podRelease(Map req) {
//货架号(数字)、工作站号(ID)/ SectionID
Integer podIndex = CommonUtils.parseInteger("podIndex", req);
String stationId = CommonUtils.parseString("stationName",req);
//String sectionId = CommonUtils.parseString("sectionId",req);
this.wmsToWcsService.releasePod(stationId,podIndex);
return this.wmsToWcsService.releasePod(stationId,podIndex);
//return false;
}
/**
......
......@@ -142,20 +142,38 @@ public class WmsToWcsService{
@Transactional
public void releasePod(String workStationId,Integer podIndex) {
public boolean releasePod(String workStationId, Integer podIndex) {
logger.info("工作站 = {} 释放pod = {} ",workStationId,podIndex);
//先查询该停止点是否有任务
Map podStation = this.getByWorkStationIdAndPodIndex(workStationId);
if(podStation == null){
logger.info("工作站 :{} 没有pod : {} ,无法释放。。",workStationId, podIndex);
return;
return false;
}
Integer wsFace = CommonUtils.parseInteger("WORKING_FACE_ORIENTATION", podStation);
String podId = CommonUtils.parseString("POD_ID", podStation);
Integer pod_toward = CommonUtils.parseInteger("TOWARD", podStation);
String face = CommonUtils.IntegerToFace(pod_toward);
/*Map pod = this.jdbcRepository.queryOneBySql(Sql_Table.SQL_QUERY_PODBYINDEX, podIndex);
if(pod == null){
return false;
}*/
//String podId = CommonUtils.parseString("ID", pod);
List<Map> data = this.jdbcRepository.queryBySql(Sql_Table.SQL_QUERY_PODORDRTASK, podId,face);
List<Map> data2 = this.jdbcRepository.queryBySql(Sql_Table.SQL_QUERY_PODORDRTASK_ICQA, podId,face);
if(!data.isEmpty() || !data2.isEmpty()){
//有任务未完成
return false;
}
String token = wmsToWcsBusiness.getTokenFromWcs();
if("".equals(token)){
logger.info("获取wcs token失败");
return;
return false;
}
token = "Bearer " + token;
Map<String, Object> taskMap = new HashMap<>();
......@@ -167,8 +185,11 @@ public class WmsToWcsService{
//更显pod状态
String sectionId = CommonUtils.parseString("SECTION_ID",podStation);
updatePodState(podIndex,sectionId, Sql_Table.AVAILABLE);
return false;
}
private Map getPodByIndexAndSection(Integer podIndex, String sectionId) {
return this.jdbcRepository.queryOneBySql(Sql_Table.SQL_QUERYPODBYINDEX, sectionId, podIndex);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment